sidekiq-unique-jobs 3.0.11 → 8.0.10

Sign up to get free protection for your applications and to get access to all the features.
Files changed (158) hide show
  1. checksums.yaml +5 -5
  2. data/CHANGELOG.md +2163 -25
  3. data/LICENSE.txt +21 -0
  4. data/README.md +984 -47
  5. data/bin/uniquejobs +7 -0
  6. data/lib/sidekiq-unique-jobs.rb +2 -36
  7. data/lib/sidekiq_unique_jobs/batch_delete.rb +120 -0
  8. data/lib/sidekiq_unique_jobs/changelog.rb +68 -0
  9. data/lib/sidekiq_unique_jobs/cli.rb +95 -0
  10. data/lib/sidekiq_unique_jobs/config.rb +306 -33
  11. data/lib/sidekiq_unique_jobs/connection.rb +20 -0
  12. data/lib/sidekiq_unique_jobs/constants.rb +55 -0
  13. data/lib/sidekiq_unique_jobs/core_ext.rb +132 -0
  14. data/lib/sidekiq_unique_jobs/deprecation.rb +65 -0
  15. data/lib/sidekiq_unique_jobs/digests.rb +134 -0
  16. data/lib/sidekiq_unique_jobs/exceptions.rb +105 -0
  17. data/lib/sidekiq_unique_jobs/expiring_digests.rb +14 -0
  18. data/lib/sidekiq_unique_jobs/job.rb +63 -0
  19. data/lib/sidekiq_unique_jobs/json.rb +47 -0
  20. data/lib/sidekiq_unique_jobs/key.rb +98 -0
  21. data/lib/sidekiq_unique_jobs/lock/base_lock.rb +165 -0
  22. data/lib/sidekiq_unique_jobs/lock/client_validator.rb +28 -0
  23. data/lib/sidekiq_unique_jobs/lock/server_validator.rb +27 -0
  24. data/lib/sidekiq_unique_jobs/lock/until_and_while_executing.rb +71 -0
  25. data/lib/sidekiq_unique_jobs/lock/until_executed.rb +48 -0
  26. data/lib/sidekiq_unique_jobs/lock/until_executing.rb +43 -0
  27. data/lib/sidekiq_unique_jobs/lock/until_expired.rb +42 -0
  28. data/lib/sidekiq_unique_jobs/lock/validator.rb +96 -0
  29. data/lib/sidekiq_unique_jobs/lock/while_executing.rb +70 -0
  30. data/lib/sidekiq_unique_jobs/lock/while_executing_reject.rb +21 -0
  31. data/lib/sidekiq_unique_jobs/lock.rb +348 -0
  32. data/lib/sidekiq_unique_jobs/lock_args.rb +127 -0
  33. data/lib/sidekiq_unique_jobs/lock_config.rb +132 -0
  34. data/lib/sidekiq_unique_jobs/lock_digest.rb +79 -0
  35. data/lib/sidekiq_unique_jobs/lock_info.rb +68 -0
  36. data/lib/sidekiq_unique_jobs/lock_timeout.rb +62 -0
  37. data/lib/sidekiq_unique_jobs/lock_ttl.rb +77 -0
  38. data/lib/sidekiq_unique_jobs/lock_type.rb +37 -0
  39. data/lib/sidekiq_unique_jobs/locksmith.rb +390 -0
  40. data/lib/sidekiq_unique_jobs/logging/middleware_context.rb +44 -0
  41. data/lib/sidekiq_unique_jobs/logging.rb +236 -0
  42. data/lib/sidekiq_unique_jobs/lua/delete.lua +49 -0
  43. data/lib/sidekiq_unique_jobs/lua/delete_by_digest.lua +39 -0
  44. data/lib/sidekiq_unique_jobs/lua/delete_job_by_digest.lua +38 -0
  45. data/lib/sidekiq_unique_jobs/lua/find_digest_in_queues.lua +26 -0
  46. data/lib/sidekiq_unique_jobs/lua/lock.lua +108 -0
  47. data/lib/sidekiq_unique_jobs/lua/lock_until_expired.lua +92 -0
  48. data/lib/sidekiq_unique_jobs/lua/locked.lua +35 -0
  49. data/lib/sidekiq_unique_jobs/lua/queue.lua +88 -0
  50. data/lib/sidekiq_unique_jobs/lua/reap_orphans.lua +119 -0
  51. data/lib/sidekiq_unique_jobs/lua/shared/_common.lua +35 -0
  52. data/lib/sidekiq_unique_jobs/lua/shared/_current_time.lua +8 -0
  53. data/lib/sidekiq_unique_jobs/lua/shared/_delete_from_queue.lua +22 -0
  54. data/lib/sidekiq_unique_jobs/lua/shared/_delete_from_sorted_set.lua +29 -0
  55. data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_process_set.lua +53 -0
  56. data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_queues.lua +43 -0
  57. data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_sorted_set.lua +24 -0
  58. data/lib/sidekiq_unique_jobs/lua/shared/_hgetall.lua +13 -0
  59. data/lib/sidekiq_unique_jobs/lua/shared/_upgrades.lua +3 -0
  60. data/lib/sidekiq_unique_jobs/lua/unlock.lua +112 -0
  61. data/lib/sidekiq_unique_jobs/lua/update_version.lua +40 -0
  62. data/lib/sidekiq_unique_jobs/lua/upgrade.lua +66 -0
  63. data/lib/sidekiq_unique_jobs/middleware/client.rb +42 -0
  64. data/lib/sidekiq_unique_jobs/middleware/server.rb +31 -0
  65. data/lib/sidekiq_unique_jobs/middleware.rb +41 -15
  66. data/lib/sidekiq_unique_jobs/normalizer.rb +17 -0
  67. data/lib/sidekiq_unique_jobs/on_conflict/log.rb +24 -0
  68. data/lib/sidekiq_unique_jobs/on_conflict/null_strategy.rb +16 -0
  69. data/lib/sidekiq_unique_jobs/on_conflict/raise.rb +17 -0
  70. data/lib/sidekiq_unique_jobs/on_conflict/reject.rb +75 -0
  71. data/lib/sidekiq_unique_jobs/on_conflict/replace.rb +82 -0
  72. data/lib/sidekiq_unique_jobs/on_conflict/reschedule.rb +39 -0
  73. data/lib/sidekiq_unique_jobs/on_conflict/strategy.rb +51 -0
  74. data/lib/sidekiq_unique_jobs/on_conflict.rb +44 -0
  75. data/lib/sidekiq_unique_jobs/options_with_fallback.rb +78 -0
  76. data/lib/sidekiq_unique_jobs/orphans/lua_reaper.rb +29 -0
  77. data/lib/sidekiq_unique_jobs/orphans/manager.rb +242 -0
  78. data/lib/sidekiq_unique_jobs/orphans/null_reaper.rb +24 -0
  79. data/lib/sidekiq_unique_jobs/orphans/observer.rb +42 -0
  80. data/lib/sidekiq_unique_jobs/orphans/reaper.rb +115 -0
  81. data/lib/sidekiq_unique_jobs/orphans/reaper_resurrector.rb +170 -0
  82. data/lib/sidekiq_unique_jobs/orphans/ruby_reaper.rb +313 -0
  83. data/lib/sidekiq_unique_jobs/redis/entity.rb +112 -0
  84. data/lib/sidekiq_unique_jobs/redis/hash.rb +56 -0
  85. data/lib/sidekiq_unique_jobs/redis/list.rb +32 -0
  86. data/lib/sidekiq_unique_jobs/redis/set.rb +32 -0
  87. data/lib/sidekiq_unique_jobs/redis/sorted_set.rb +102 -0
  88. data/lib/sidekiq_unique_jobs/redis/string.rb +51 -0
  89. data/lib/sidekiq_unique_jobs/redis.rb +11 -0
  90. data/lib/sidekiq_unique_jobs/reflectable.rb +26 -0
  91. data/lib/sidekiq_unique_jobs/reflections.rb +79 -0
  92. data/lib/sidekiq_unique_jobs/rspec/matchers/have_valid_sidekiq_options.rb +51 -0
  93. data/lib/sidekiq_unique_jobs/rspec/matchers.rb +26 -0
  94. data/lib/sidekiq_unique_jobs/script/caller.rb +133 -0
  95. data/lib/sidekiq_unique_jobs/script/client.rb +94 -0
  96. data/lib/sidekiq_unique_jobs/script/config.rb +68 -0
  97. data/lib/sidekiq_unique_jobs/script/dsl.rb +60 -0
  98. data/lib/sidekiq_unique_jobs/script/logging.rb +95 -0
  99. data/lib/sidekiq_unique_jobs/script/lua_error.rb +96 -0
  100. data/lib/sidekiq_unique_jobs/script/script.rb +75 -0
  101. data/lib/sidekiq_unique_jobs/script/scripts.rb +123 -0
  102. data/lib/sidekiq_unique_jobs/script/template.rb +41 -0
  103. data/lib/sidekiq_unique_jobs/script/timing.rb +35 -0
  104. data/lib/sidekiq_unique_jobs/script.rb +46 -0
  105. data/lib/sidekiq_unique_jobs/server.rb +62 -0
  106. data/lib/sidekiq_unique_jobs/sidekiq_unique_ext.rb +110 -37
  107. data/lib/sidekiq_unique_jobs/sidekiq_unique_jobs.rb +304 -0
  108. data/lib/sidekiq_unique_jobs/sidekiq_worker_methods.rb +84 -0
  109. data/lib/sidekiq_unique_jobs/testing.rb +132 -9
  110. data/lib/sidekiq_unique_jobs/timer_task.rb +299 -0
  111. data/lib/sidekiq_unique_jobs/timing.rb +58 -0
  112. data/lib/sidekiq_unique_jobs/unlockable.rb +43 -0
  113. data/lib/sidekiq_unique_jobs/update_version.rb +25 -0
  114. data/lib/sidekiq_unique_jobs/upgrade_locks.rb +152 -0
  115. data/lib/sidekiq_unique_jobs/version.rb +5 -1
  116. data/lib/sidekiq_unique_jobs/version_check.rb +114 -0
  117. data/lib/sidekiq_unique_jobs/web/helpers.rb +175 -0
  118. data/lib/sidekiq_unique_jobs/web/views/_paging.erb +10 -0
  119. data/lib/sidekiq_unique_jobs/web/views/changelogs.erb +60 -0
  120. data/lib/sidekiq_unique_jobs/web/views/lock.erb +110 -0
  121. data/lib/sidekiq_unique_jobs/web/views/locks.erb +59 -0
  122. data/lib/sidekiq_unique_jobs/web.rb +109 -0
  123. data/lib/sidekiq_unique_jobs.rb +83 -0
  124. data/lib/tasks/changelog.rake +23 -0
  125. metadata +157 -126
  126. data/.gitignore +0 -10
  127. data/.rspec +0 -3
  128. data/.rubocop.yml +0 -36
  129. data/.travis.yml +0 -25
  130. data/Appraisals +0 -20
  131. data/Gemfile +0 -5
  132. data/LICENSE +0 -22
  133. data/Rakefile +0 -11
  134. data/gemfiles/sidekiq_2.15.gemfile +0 -9
  135. data/gemfiles/sidekiq_2.16.gemfile +0 -9
  136. data/gemfiles/sidekiq_2.17.gemfile +0 -9
  137. data/gemfiles/sidekiq_3.0.gemfile +0 -9
  138. data/gemfiles/sidekiq_develop.gemfile +0 -9
  139. data/lib/sidekiq_unique_jobs/connectors/redis_pool.rb +0 -11
  140. data/lib/sidekiq_unique_jobs/connectors/sidekiq_redis.rb +0 -9
  141. data/lib/sidekiq_unique_jobs/connectors/testing.rb +0 -11
  142. data/lib/sidekiq_unique_jobs/connectors.rb +0 -16
  143. data/lib/sidekiq_unique_jobs/middleware/client/strategies/testing_inline.rb +0 -25
  144. data/lib/sidekiq_unique_jobs/middleware/client/strategies/unique.rb +0 -76
  145. data/lib/sidekiq_unique_jobs/middleware/client/unique_jobs.rb +0 -39
  146. data/lib/sidekiq_unique_jobs/middleware/server/unique_jobs.rb +0 -69
  147. data/lib/sidekiq_unique_jobs/payload_helper.rb +0 -42
  148. data/sidekiq-unique-jobs.gemspec +0 -27
  149. data/spec/lib/.sidekiq_testing_enabled_spec.rb.swp +0 -0
  150. data/spec/lib/client_spec.rb +0 -173
  151. data/spec/lib/middleware/server/unique_jobs_spec.rb +0 -81
  152. data/spec/lib/sidekiq_testing_enabled_spec.rb +0 -123
  153. data/spec/lib/sidekiq_unique_ext_spec.rb +0 -70
  154. data/spec/lib/unlock_order_spec.rb +0 -64
  155. data/spec/spec_helper.rb +0 -37
  156. data/spec/support/my_worker.rb +0 -13
  157. data/spec/support/sidekiq_meta.rb +0 -17
  158. data/spec/support/unique_worker.rb +0 -13
@@ -0,0 +1,88 @@
1
+ -------- BEGIN keys ---------
2
+ local digest = KEYS[1]
3
+ local queued = KEYS[2]
4
+ local primed = KEYS[3]
5
+ local locked = KEYS[4]
6
+ local info = KEYS[5]
7
+ local changelog = KEYS[6]
8
+ local digests = KEYS[7]
9
+ -------- END keys ---------
10
+
11
+
12
+ -------- BEGIN lock arguments ---------
13
+ local job_id = ARGV[1] -- The job_id that was previously primed
14
+ local pttl = tonumber(ARGV[2])
15
+ local lock_type = ARGV[3]
16
+ local limit = tonumber(ARGV[4])
17
+ local lock_score = ARGV[5]
18
+ -------- END lock arguments -----------
19
+
20
+
21
+ -------- BEGIN injected arguments --------
22
+ local current_time = tonumber(ARGV[6])
23
+ local debug_lua = tostring(ARGV[7]) == "1"
24
+ local max_history = tonumber(ARGV[8])
25
+ local script_name = tostring(ARGV[9]) .. ".lua"
26
+ --------- END injected arguments ---------
27
+
28
+
29
+ -------- BEGIN Variables --------
30
+ local queued_count = redis.call("LLEN", queued)
31
+ local locked_count = redis.call("HLEN", locked)
32
+ local within_limit = limit > locked_count
33
+ local limit_exceeded = not within_limit
34
+ -------- END Variables --------
35
+
36
+
37
+ -------- BEGIN local functions --------
38
+ <%= include_partial "shared/_common.lua" %>
39
+ ---------- END local functions ----------
40
+
41
+
42
+ -------- BEGIN queue.lua --------
43
+ log_debug("BEGIN queue with key:", digest, "for job:", job_id)
44
+
45
+ if redis.call("HEXISTS", locked, job_id) == 1 then
46
+ log_debug("HEXISTS", locked, job_id, "== 1")
47
+ log("Duplicate")
48
+ return job_id
49
+ end
50
+
51
+ local prev_jid = redis.call("GET", digest)
52
+ log_debug("job_id:", job_id, "prev_jid:", prev_jid)
53
+ if not prev_jid or prev_jid == false then
54
+ log_debug("SET", digest, job_id)
55
+ redis.call("SET", digest, job_id)
56
+ elseif prev_jid == job_id then
57
+ log_debug(digest, "already queued with job_id:", job_id)
58
+ log("Duplicate")
59
+ return job_id
60
+ else
61
+ -- TODO: Consider constraining the total count of both locked and queued?
62
+ if within_limit and queued_count < limit then
63
+ log_debug("Within limit:", digest, "(", locked_count, "of", limit, ")", "queued (", queued_count, "of", limit, ")")
64
+ log_debug("SET", digest, job_id, "(was", prev_jid, ")")
65
+ redis.call("SET", digest, job_id)
66
+ else
67
+ log_debug("Limit exceeded:", digest, "(", locked_count, "of", limit, ")")
68
+ log("Limit exceeded", prev_jid)
69
+ return prev_jid
70
+ end
71
+ end
72
+
73
+ log_debug("LPUSH", queued, job_id)
74
+ redis.call("LPUSH", queued, job_id)
75
+
76
+ -- The Sidekiq client should only set pttl for until_expired
77
+ -- The Sidekiq server should set pttl for all other jobs
78
+ if pttl and pttl > 0 then
79
+ log_debug("PEXPIRE", digest, pttl)
80
+ redis.call("PEXPIRE", digest, pttl)
81
+ log_debug("PEXPIRE", queued, pttl)
82
+ redis.call("PEXPIRE", queued, pttl)
83
+ end
84
+
85
+ log("Queued")
86
+ log_debug("END queue with key:", digest, "for job:", job_id)
87
+ return job_id
88
+ -------- END queue.lua --------
@@ -0,0 +1,119 @@
1
+ redis.replicate_commands()
2
+
3
+ -------- BEGIN keys ---------
4
+ local digests_set = KEYS[1]
5
+ local expiring_digests_set = KEYS[2]
6
+ local schedule_set = KEYS[3]
7
+ local retry_set = KEYS[4]
8
+ -------- END keys ---------
9
+
10
+ -------- BEGIN argv ---------
11
+ local reaper_count = tonumber(ARGV[1])
12
+ local threshold = tonumber(ARGV[2])
13
+ -------- END argv ---------
14
+
15
+ -------- BEGIN injected arguments --------
16
+ local current_time = tonumber(ARGV[3])
17
+ local debug_lua = tostring(ARGV[4]) == "1"
18
+ local max_history = tonumber(ARGV[5])
19
+ local script_name = ARGV[6] .. ".lua"
20
+ local redisversion = ARGV[7]
21
+ --------- END injected arguments ---------
22
+
23
+
24
+ -------- BEGIN local functions --------
25
+ <%= include_partial "shared/_common.lua" %>
26
+ <%= include_partial "shared/_find_digest_in_queues.lua" %>
27
+ <%= include_partial "shared/_find_digest_in_sorted_set.lua" %>
28
+ <%= include_partial "shared/_find_digest_in_process_set.lua" %>
29
+ ---------- END local functions ----------
30
+
31
+
32
+ -------- BEGIN delete_orphaned.lua --------
33
+ log_debug("BEGIN")
34
+ local found = false
35
+ local per = 50
36
+ local total = redis.call("ZCARD", digests_set)
37
+ local index = 0
38
+ local del_count = 0
39
+ local redis_ver = toversion(redisversion)
40
+
41
+ repeat
42
+ log_debug("Interating through:", digests_set, "for orphaned locks")
43
+ local digests = redis.call("ZREVRANGE", digests_set, index, index + per -1)
44
+
45
+ for _, digest in pairs(digests) do
46
+ log_debug("Searching for digest:", digest, "in", schedule_set)
47
+ found = find_digest_in_sorted_set(schedule_set, digest)
48
+
49
+ if found ~= true then
50
+ log_debug("Searching for digest:", digest, "in", retry_set)
51
+ found = find_digest_in_sorted_set(retry_set, digest)
52
+ end
53
+
54
+ if found ~= true then
55
+ log_debug("Searching for digest:", digest, "in all queues")
56
+ local queue = find_digest_in_queues(digest)
57
+
58
+ if queue then
59
+ log_debug("found digest:", digest, "in queue:", queue)
60
+ found = true
61
+ end
62
+ end
63
+
64
+ -- TODO: Add check for jobs checked out by process
65
+ if found ~= true then
66
+ log_debug("Searching for digest:", digest, "in process sets")
67
+ found = find_digest_in_process_set(digest, threshold)
68
+ end
69
+
70
+ if found ~= true then
71
+ local queued = digest .. ":QUEUED"
72
+ local primed = digest .. ":PRIMED"
73
+ local locked = digest .. ":LOCKED"
74
+ local info = digest .. ":INFO"
75
+ local run_digest = digest .. ":RUN"
76
+ local run_queued = digest .. ":RUN:QUEUED"
77
+ local run_primed = digest .. ":RUN:PRIMED"
78
+ local run_locked = digest .. ":RUN:LOCKED"
79
+ local run_info = digest .. ":RUN:INFO"
80
+
81
+ redis.call("UNLINK", digest, queued, primed, locked, info, run_digest, run_queued, run_primed, run_locked, run_info)
82
+
83
+ redis.call("ZREM", digests_set, digest)
84
+ del_count = del_count + 1
85
+ end
86
+ end
87
+
88
+ index = index + per
89
+ until index >= total or del_count >= reaper_count
90
+
91
+ if del_count < reaper_count then
92
+ index = 0
93
+ total = redis.call("ZCOUNT", expiring_digests_set, 0, current_time)
94
+ repeat
95
+ local digests = redis.call("ZRANGEBYSCORE", expiring_digests_set, 0, current_time, "LIMIT", index, index + per -1)
96
+
97
+ for _, digest in pairs(digests) do
98
+ local queued = digest .. ":QUEUED"
99
+ local primed = digest .. ":PRIMED"
100
+ local locked = digest .. ":LOCKED"
101
+ local info = digest .. ":INFO"
102
+ local run_digest = digest .. ":RUN"
103
+ local run_queued = digest .. ":RUN:QUEUED"
104
+ local run_primed = digest .. ":RUN:PRIMED"
105
+ local run_locked = digest .. ":RUN:LOCKED"
106
+ local run_info = digest .. ":RUN:INFO"
107
+
108
+ redis.call("UNLINK", digest, queued, primed, locked, info, run_digest, run_queued, run_primed, run_locked, run_info)
109
+
110
+ redis.call("ZREM", expiring_digests_set, digest)
111
+ del_count = del_count + 1
112
+ end
113
+
114
+ index = index + per
115
+ until index >= total or del_count >= reaper_count
116
+ end
117
+
118
+ log_debug("END")
119
+ return del_count
@@ -0,0 +1,35 @@
1
+ local function toversion(version)
2
+ local _, _, maj, min, pat = string.find(version, "(%d+)%.(%d+)%.(%d+)")
3
+
4
+ return {
5
+ ["version"] = version,
6
+ ["major"] = tonumber(maj),
7
+ ["minor"] = tonumber(min),
8
+ ["patch"] = tonumber(pat)
9
+ }
10
+ end
11
+
12
+ local function log_debug( ... )
13
+ if debug_lua ~= true then return end
14
+
15
+ local result = ""
16
+ for _,v in ipairs(arg) do
17
+ result = result .. " " .. tostring(v)
18
+ end
19
+ redis.log(redis.LOG_DEBUG, script_name .. " -" .. result)
20
+ end
21
+
22
+ local function log(message, prev_jid)
23
+ if not max_history or max_history == 0 then return end
24
+ local entry = cjson.encode({digest = digest, job_id = job_id, script = script_name, message = message, time = current_time, prev_jid = prev_jid })
25
+
26
+ log_debug("ZADD", changelog, current_time, entry);
27
+ redis.call("ZADD", changelog, current_time, entry);
28
+ local total_entries = redis.call("ZCARD", changelog)
29
+ local removed_entries = redis.call("ZREMRANGEBYRANK", changelog, 0, -1 * max_history)
30
+ if removed_entries > 0 then
31
+ log_debug("Removing", removed_entries , "entries from changelog (total entries", total_entries, "exceeds max_history:", max_history ..")");
32
+ end
33
+ log_debug("PUBLISH", changelog, entry);
34
+ redis.call("PUBLISH", changelog, entry);
35
+ end
@@ -0,0 +1,8 @@
1
+ local function current_time()
2
+ local time = redis.call("time")
3
+ local s = time[1]
4
+ local ms = time[2]
5
+ local number = tonumber((s .. "." .. ms))
6
+
7
+ return number
8
+ end
@@ -0,0 +1,22 @@
1
+ local function delete_from_queue(queue, digest)
2
+ local total = redis.call("LLEN", queue)
3
+ local per = 50
4
+
5
+ for index = 0, total, per do
6
+ local items = redis.call("LRANGE", queue, index, index + per - 1)
7
+
8
+ if #items == 0 then
9
+ break
10
+ end
11
+
12
+ for _, item in pairs(items) do
13
+ if string.find(item, digest) then
14
+ redis.call("LREM", queue, 1, item)
15
+
16
+ return item
17
+ end
18
+ end
19
+ end
20
+
21
+ return nil
22
+ end
@@ -0,0 +1,29 @@
1
+ local function delete_from_sorted_set(name, digest)
2
+ local score = redis.call("ZSCORE", "uniquejobs:digests", digest)
3
+ local total = redis.call("ZCARD", name)
4
+ local per = 50
5
+
6
+ for offset = 0, total, per do
7
+ local items
8
+
9
+ if score then
10
+ items = redis.call("ZRANGE", name, score, "+inf", "BYSCORE", "LIMIT", offset, per)
11
+ else
12
+ items = redis.call("ZRANGE", name, offset, offset + per -1)
13
+ end
14
+
15
+ if #items == 0 then
16
+ break
17
+ end
18
+
19
+ for _, item in pairs(items) do
20
+ if string.find(item, digest) then
21
+ redis.call("ZREM", name, item)
22
+
23
+ return item
24
+ end
25
+ end
26
+ end
27
+
28
+ return nil
29
+ end
@@ -0,0 +1,53 @@
1
+ local function find_digest_in_process_set(digest, threshold)
2
+ local process_cursor = 0
3
+ local job_cursor = 0
4
+ local pattern = "*" .. digest .. "*"
5
+ local found = false
6
+
7
+ log_debug("Searching in process list",
8
+ "for digest:", digest,
9
+ "cursor:", process_cursor)
10
+
11
+ repeat
12
+ local process_paginator = redis.call("SSCAN", "processes", process_cursor, "MATCH", "*")
13
+ local next_process_cursor = process_paginator[1]
14
+ local processes = process_paginator[2]
15
+ log_debug("Found number of processes:", #processes, "next cursor:", next_process_cursor)
16
+
17
+ for _, process in ipairs(processes) do
18
+ local workers_key = process .. ":work"
19
+ log_debug("searching in process set:", process,
20
+ "for digest:", digest,
21
+ "cursor:", process_cursor)
22
+
23
+ local jobs = redis.call("HGETALL", workers_key)
24
+
25
+ if #jobs == 0 then
26
+ log_debug("No entries in:", workers_key)
27
+ else
28
+ for i = 1, #jobs, 2 do
29
+ local jobstr = jobs[i +1]
30
+ if string.find(string.gsub(jobstr, ':RUN', ''), string.gsub(digest, ':RUN', '')) then
31
+ log_debug("Found digest", digest, "in:", workers_key)
32
+ found = true
33
+ break
34
+ end
35
+
36
+ local job = cjson.decode(jobstr)
37
+ if job.payload.created_at > threshold then
38
+ found = true
39
+ break
40
+ end
41
+ end
42
+ end
43
+
44
+ if found == true then
45
+ break
46
+ end
47
+ end
48
+
49
+ process_cursor = next_process_cursor
50
+ until found == true or process_cursor == "0"
51
+
52
+ return found
53
+ end
@@ -0,0 +1,43 @@
1
+ local function find_digest_in_queues(digest)
2
+ local cursor = "0"
3
+ local count = 50
4
+ local result = nil
5
+ local found = false
6
+
7
+ repeat
8
+ log_debug("searching all queues for a matching digest:", digest)
9
+ local pagination = redis.call("SCAN", cursor, "MATCH", "queue:*", "COUNT", count)
10
+ local next_cursor = pagination[1]
11
+ local queues = pagination[2]
12
+
13
+ for _, queue in ipairs(queues) do
14
+ local per = 50
15
+ local total = redis.call("LLEN", queue)
16
+ local index = 0
17
+
18
+ log_debug("searching in:", queue,
19
+ "for digest:", digest,
20
+ "from:", index,
21
+ "to:", total,
22
+ "(per: " .. per .. ")",
23
+ "cursor:", cursor)
24
+
25
+ while (index < total) do
26
+ local items = redis.call("LRANGE", queue, index, index + per -1)
27
+ for _, item in pairs(items) do
28
+ if string.find(item, digest) then
29
+ log_debug("Found digest:", digest, "in queue:", queue)
30
+ result = cjson.decode(item).queue
31
+ found = true
32
+ break
33
+ end
34
+ end
35
+ index = index + per
36
+ end
37
+ end
38
+
39
+ cursor = next_cursor
40
+ until found == true or cursor == "0"
41
+
42
+ return result
43
+ end
@@ -0,0 +1,24 @@
1
+ local function find_digest_in_sorted_set(name, digest)
2
+ local cursor = 0
3
+ local count = 5
4
+ local pattern = "*" .. digest .. "*"
5
+ local found = false
6
+
7
+ log_debug("searching in:", name,
8
+ "for digest:", digest,
9
+ "cursor:", cursor)
10
+ repeat
11
+ local pagination = redis.call("ZSCAN", name, cursor, "MATCH", pattern, "COUNT", count)
12
+ local next_cursor = pagination[1]
13
+ local items = pagination[2]
14
+
15
+ if #items > 0 then
16
+ log_debug("Found digest", digest, "in zset:", name)
17
+ found = true
18
+ end
19
+
20
+ cursor = next_cursor
21
+ until found == true or cursor == "0"
22
+
23
+ return found
24
+ end
@@ -0,0 +1,13 @@
1
+ local hgetall = function (key)
2
+ local bulk = redis.call("HGETALL", key)
3
+ local result = {}
4
+ local nextkey
5
+ for i, v in ipairs(bulk) do
6
+ if i % 2 == 1 then
7
+ nextkey = v
8
+ else
9
+ result[nextkey] = v
10
+ end
11
+ end
12
+ return result
13
+ end
@@ -0,0 +1,3 @@
1
+ local function upgrade_v6_to_v7()
2
+
3
+ end
@@ -0,0 +1,112 @@
1
+ -------- BEGIN keys ---------
2
+ local digest = KEYS[1]
3
+ local queued = KEYS[2]
4
+ local primed = KEYS[3]
5
+ local locked = KEYS[4]
6
+ local info = KEYS[5]
7
+ local changelog = KEYS[6]
8
+ local digests = KEYS[7]
9
+ -------- END keys ---------
10
+
11
+
12
+ -------- BEGIN lock arguments ---------
13
+ local job_id = ARGV[1]
14
+ local pttl = tonumber(ARGV[2])
15
+ local lock_type = ARGV[3]
16
+ local limit = tonumber(ARGV[4])
17
+ local lock_score = ARGV[5]
18
+ -------- END lock arguments -----------
19
+
20
+
21
+ -------- BEGIN injected arguments --------
22
+ local current_time = tonumber(ARGV[6])
23
+ local debug_lua = tostring(ARGV[7]) == "1"
24
+ local max_history = tonumber(ARGV[8])
25
+ local script_name = tostring(ARGV[9]) .. ".lua"
26
+ local redisversion = ARGV[10]
27
+ --------- END injected arguments ---------
28
+
29
+
30
+ -------- BEGIN Variables --------
31
+ local queued_count = redis.call("LLEN", queued)
32
+ local primed_count = redis.call("LLEN", primed)
33
+ local locked_count = redis.call("HLEN", locked)
34
+ --------- END Variables ---------
35
+
36
+
37
+ -------- BEGIN local functions --------
38
+ <%= include_partial "shared/_common.lua" %>
39
+ ---------- END local functions ----------
40
+
41
+
42
+ --------- Begin unlock.lua ---------
43
+ log_debug("BEGIN unlock digest:", digest, "(job_id: " .. job_id ..")")
44
+
45
+ log_debug("HEXISTS", locked, job_id)
46
+ if redis.call("HEXISTS", locked, job_id) == 0 then
47
+ -- TODO: Improve orphaned lock detection
48
+ if queued_count == 0 and primed_count == 0 and locked_count == 0 then
49
+ log_debug("Orphaned lock")
50
+ else
51
+ local result = ""
52
+ for i,v in ipairs(redis.call("HKEYS", locked)) do
53
+ result = result .. v .. ","
54
+ end
55
+ result = locked .. " (" .. result .. ")"
56
+ log("Yielding to: " .. result)
57
+ log_debug("Yielding to", result, locked, "by job", job_id)
58
+ return nil
59
+ end
60
+ end
61
+
62
+ -- Just in case something went wrong
63
+ log_debug("LREM", queued, -1, job_id)
64
+ redis.call("LREM", queued, -1, job_id)
65
+
66
+ log_debug("LREM", primed, -1, job_id)
67
+ redis.call("LREM", primed, -1, job_id)
68
+
69
+ local redis_version = toversion(redisversion)
70
+
71
+ if lock_type ~= "until_expired" then
72
+ log_debug("UNLINK", digest, info)
73
+ redis.call("UNLINK", digest, info)
74
+
75
+ log_debug("HDEL", locked, job_id)
76
+ redis.call("HDEL", locked, job_id)
77
+ end
78
+
79
+ if redis.call("LLEN", primed) == 0 then
80
+ log_debug("UNLINK", primed)
81
+ redis.call("UNLINK", primed)
82
+ end
83
+
84
+ local locked_count = redis.call("HLEN", locked)
85
+
86
+ if locked_count < 1 then
87
+ log_debug("UNLINK", locked)
88
+ redis.call("UNLINK", locked)
89
+ end
90
+
91
+ if limit then
92
+ if limit <= 1 and locked_count <= 1 then
93
+ log_debug("ZREM", digests, digest)
94
+ redis.call("ZREM", digests, digest)
95
+ end
96
+ else
97
+ if locked_count <= 1 then
98
+ log_debug("ZREM", digests, digest)
99
+ redis.call("ZREM", digests, digest)
100
+ end
101
+ end
102
+
103
+ log_debug("LPUSH", queued, "1")
104
+ redis.call("LPUSH", queued, "1")
105
+
106
+ log_debug("PEXPIRE", queued, 5000)
107
+ redis.call("PEXPIRE", queued, 5000)
108
+
109
+ log("Unlocked")
110
+ log_debug("END unlock digest:", digest, "(job_id: " .. job_id ..")")
111
+ return job_id
112
+ --------- END unlock.lua ---------
@@ -0,0 +1,40 @@
1
+ -------- BEGIN keys ---------
2
+ local live_key = KEYS[1]
3
+ local dead_key = KEYS[2]
4
+ -------- END keys ---------
5
+
6
+ -------- BEGIN lock arguments ---------
7
+ local version = ARGV[1]
8
+ -------- END lock arguments -----------
9
+
10
+ -------- BEGIN injected arguments --------
11
+ local current_time = tonumber(ARGV[2])
12
+ local debug_lua = tostring(ARGV[3]) == "1"
13
+ local max_history = tonumber(ARGV[4])
14
+ local script_name = tostring(ARGV[5]) .. ".lua"
15
+ --------- END injected arguments ---------
16
+
17
+ -------- BEGIN local functions --------
18
+ <%= include_partial "shared/_common.lua" %>
19
+ ---------- END local functions ----------
20
+
21
+
22
+ -------- BEGIN set_version.lua --------
23
+ log_debug("BEGIN setting version:", version)
24
+
25
+ local updated = false
26
+ local old_version = redis.call("GETSET", live_key, version)
27
+
28
+ if not old_version then
29
+ log_debug("No previous version found")
30
+ updated = true
31
+ elseif old_version ~= version then
32
+ log_debug("Old version:", old_version, "differs from:", version)
33
+ redis.call("SET", dead_key, old_version)
34
+ updated = true
35
+ end
36
+
37
+ return updated
38
+ -------- END delete.lua --------
39
+
40
+
@@ -0,0 +1,66 @@
1
+ redis.replicate_commands()
2
+ -------- BEGIN keys ---------
3
+ local live_version = KEYS[1]
4
+ local dead_version = KEYS[2]
5
+ -------- END keys ---------
6
+
7
+ -------- BEGIN injected arguments --------
8
+ local current_time = tonumber(ARGV[5])
9
+ local debug_lua = tostring(ARGV[6]) == "1"
10
+ local max_history = tonumber(ARGV[7])
11
+ local script_name = tostring(ARGV[8]) .. ".lua"
12
+ local redisversion = ARGV[9]
13
+ --------- END injected arguments ---------
14
+
15
+ -------- BEGIN local functions --------
16
+ <%= include_partial "shared/_common.lua" %>
17
+ <%= include_partial "shared/_upgrades.lua" %>
18
+ ---------- END local functions ----------
19
+
20
+
21
+ local new_version = redis.call("GET", live_version)
22
+ local old_version = redis.call("GET", dead_version)
23
+ local redis_version = toversion(redisversion)
24
+ local upgraded = 0
25
+
26
+ -------- BEGIN delete.lua --------
27
+
28
+ log_debug("BEGIN upgrading from: ", old_version, "to:", new_version)
29
+
30
+ -- 1. Loop through all uniquejobs:jajshdahdas:GRABBED
31
+ local cursor = "0"
32
+ local per = 50
33
+ repeat
34
+ local pagination = redis.call("SCAN", cursor, "MATCH", "*:GRABBED", "COUNT", per)
35
+ local next_cursor = pagination[1]
36
+ local grabbed_keys = pagination[2]
37
+
38
+ for _, grabbed in ipairs(grabbed_keys) do
39
+ local locked_key = grabbed.gsub(":GRABBED", ":LOCKED")
40
+ local locks = redis.call("HGETALL", grabbed)
41
+
42
+ if #locks == 0 then
43
+ log_debug("No entries in:", grabbed)
44
+ else
45
+ log_debug("HMSET", locked_key, unpack(locks))
46
+ redis.call("HMSET", locked_key, unpack(locks))
47
+ end
48
+
49
+ log_debug("DEL", grabbed)
50
+ redis.call("DEL", grabbed)
51
+
52
+ upgraded = upgraded + 1
53
+ end
54
+
55
+ cursor = next_cursor
56
+ if cursor == "0" then
57
+ log_debug("Looped through all grabbed keys, stopping iteration")
58
+ end
59
+ until cursor == "0"
60
+
61
+
62
+
63
+ log_debug("END upgrading from: ", old_version, "to:", new_version)
64
+
65
+ return
66
+ -------- END delete.lua --------