sidekiq-unique-jobs 7.1.8 → 8.0.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq-unique-jobs might be problematic. Click here for more details.

Files changed (72) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +315 -2
  3. data/README.md +17 -14
  4. data/lib/sidekiq_unique_jobs/batch_delete.rb +7 -10
  5. data/lib/sidekiq_unique_jobs/changelog.rb +7 -17
  6. data/lib/sidekiq_unique_jobs/cli.rb +33 -8
  7. data/lib/sidekiq_unique_jobs/config.rb +5 -0
  8. data/lib/sidekiq_unique_jobs/connection.rb +4 -7
  9. data/lib/sidekiq_unique_jobs/constants.rb +1 -0
  10. data/lib/sidekiq_unique_jobs/core_ext.rb +1 -1
  11. data/lib/sidekiq_unique_jobs/digests.rb +7 -17
  12. data/lib/sidekiq_unique_jobs/exceptions.rb +3 -3
  13. data/lib/sidekiq_unique_jobs/expiring_digests.rb +14 -0
  14. data/lib/sidekiq_unique_jobs/key.rb +13 -8
  15. data/lib/sidekiq_unique_jobs/lock/until_executed.rb +5 -1
  16. data/lib/sidekiq_unique_jobs/lock/until_executing.rb +4 -0
  17. data/lib/sidekiq_unique_jobs/lock/until_expired.rb +3 -1
  18. data/lib/sidekiq_unique_jobs/lock/while_executing.rb +4 -1
  19. data/lib/sidekiq_unique_jobs/lock.rb +32 -12
  20. data/lib/sidekiq_unique_jobs/lock_args.rb +19 -15
  21. data/lib/sidekiq_unique_jobs/lock_config.rb +6 -6
  22. data/lib/sidekiq_unique_jobs/lock_digest.rb +7 -7
  23. data/lib/sidekiq_unique_jobs/lock_info.rb +2 -2
  24. data/lib/sidekiq_unique_jobs/lock_timeout.rb +4 -4
  25. data/lib/sidekiq_unique_jobs/lock_ttl.rb +4 -4
  26. data/lib/sidekiq_unique_jobs/locksmith.rb +31 -11
  27. data/lib/sidekiq_unique_jobs/logging.rb +14 -0
  28. data/lib/sidekiq_unique_jobs/lua/delete.lua +3 -6
  29. data/lib/sidekiq_unique_jobs/lua/delete_by_digest.lua +3 -6
  30. data/lib/sidekiq_unique_jobs/lua/delete_job_by_digest.lua +1 -1
  31. data/lib/sidekiq_unique_jobs/lua/find_digest_in_queues.lua +1 -1
  32. data/lib/sidekiq_unique_jobs/lua/lock.lua +16 -10
  33. data/lib/sidekiq_unique_jobs/lua/lock_until_expired.lua +92 -0
  34. data/lib/sidekiq_unique_jobs/lua/locked.lua +1 -1
  35. data/lib/sidekiq_unique_jobs/lua/queue.lua +1 -1
  36. data/lib/sidekiq_unique_jobs/lua/reap_orphans.lua +33 -8
  37. data/lib/sidekiq_unique_jobs/lua/shared/_common.lua +1 -6
  38. data/lib/sidekiq_unique_jobs/lua/shared/_delete_from_sorted_set.lua +1 -0
  39. data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_process_set.lua +1 -1
  40. data/lib/sidekiq_unique_jobs/lua/unlock.lua +16 -15
  41. data/lib/sidekiq_unique_jobs/lua/update_version.lua +1 -1
  42. data/lib/sidekiq_unique_jobs/lua/upgrade.lua +1 -3
  43. data/lib/sidekiq_unique_jobs/middleware/client.rb +2 -0
  44. data/lib/sidekiq_unique_jobs/middleware/server.rb +2 -0
  45. data/lib/sidekiq_unique_jobs/middleware.rb +4 -4
  46. data/lib/sidekiq_unique_jobs/on_conflict/reject.rb +0 -43
  47. data/lib/sidekiq_unique_jobs/on_conflict/reschedule.rb +3 -3
  48. data/lib/sidekiq_unique_jobs/options_with_fallback.rb +3 -3
  49. data/lib/sidekiq_unique_jobs/orphans/lua_reaper.rb +1 -1
  50. data/lib/sidekiq_unique_jobs/orphans/manager.rb +4 -11
  51. data/lib/sidekiq_unique_jobs/orphans/ruby_reaper.rb +95 -16
  52. data/lib/sidekiq_unique_jobs/redis/sorted_set.rb +9 -2
  53. data/lib/sidekiq_unique_jobs/redis/string.rb +3 -1
  54. data/lib/sidekiq_unique_jobs/reflections.rb +1 -1
  55. data/lib/sidekiq_unique_jobs/script/caller.rb +14 -8
  56. data/lib/sidekiq_unique_jobs/server.rb +0 -1
  57. data/lib/sidekiq_unique_jobs/sidekiq_unique_ext.rb +1 -1
  58. data/lib/sidekiq_unique_jobs/sidekiq_unique_jobs.rb +11 -1
  59. data/lib/sidekiq_unique_jobs/sidekiq_worker_methods.rb +40 -21
  60. data/lib/sidekiq_unique_jobs/testing.rb +53 -21
  61. data/lib/sidekiq_unique_jobs/timer_task.rb +266 -45
  62. data/lib/sidekiq_unique_jobs/timing.rb +1 -1
  63. data/lib/sidekiq_unique_jobs/upgrade_locks.rb +11 -14
  64. data/lib/sidekiq_unique_jobs/version.rb +1 -1
  65. data/lib/sidekiq_unique_jobs/web/helpers.rb +15 -3
  66. data/lib/sidekiq_unique_jobs/web/views/changelogs.erb +44 -38
  67. data/lib/sidekiq_unique_jobs/web/views/lock.erb +5 -3
  68. data/lib/sidekiq_unique_jobs/web/views/locks.erb +42 -37
  69. data/lib/sidekiq_unique_jobs/web.rb +26 -8
  70. data/lib/sidekiq_unique_jobs.rb +1 -0
  71. data/lib/tasks/changelog.rake +1 -1
  72. metadata +15 -43
@@ -30,6 +30,8 @@ module SidekiqUniqueJobs
30
30
  # @yield [String, Exception] the message or exception to use for log message
31
31
  #
32
32
  def log_debug(message_or_exception = nil, item = nil, &block)
33
+ return unless logging?
34
+
33
35
  message = build_message(message_or_exception, item)
34
36
  logger.debug(message, &block)
35
37
  nil
@@ -45,6 +47,8 @@ module SidekiqUniqueJobs
45
47
  # @yield [String, Exception] the message or exception to use for log message
46
48
  #
47
49
  def log_info(message_or_exception = nil, item = nil, &block)
50
+ return unless logging?
51
+
48
52
  message = build_message(message_or_exception, item)
49
53
  logger.info(message, &block)
50
54
  nil
@@ -60,6 +64,8 @@ module SidekiqUniqueJobs
60
64
  # @yield [String, Exception] the message or exception to use for log message
61
65
  #
62
66
  def log_warn(message_or_exception = nil, item = nil, &block)
67
+ return unless logging?
68
+
63
69
  message = build_message(message_or_exception, item)
64
70
  logger.warn(message, &block)
65
71
  nil
@@ -75,6 +81,8 @@ module SidekiqUniqueJobs
75
81
  # @yield [String, Exception] the message or exception to use for log message
76
82
  #
77
83
  def log_error(message_or_exception = nil, item = nil, &block)
84
+ return unless logging?
85
+
78
86
  message = build_message(message_or_exception, item)
79
87
  logger.error(message, &block)
80
88
  nil
@@ -90,6 +98,8 @@ module SidekiqUniqueJobs
90
98
  # @yield [String, Exception] the message or exception to use for log message
91
99
  #
92
100
  def log_fatal(message_or_exception = nil, item = nil, &block)
101
+ return unless logging?
102
+
93
103
  message = build_message(message_or_exception, item)
94
104
  logger.fatal(message, &block)
95
105
 
@@ -218,5 +228,9 @@ module SidekiqUniqueJobs
218
228
 
219
229
  yield
220
230
  end
231
+
232
+ def logging?
233
+ SidekiqUniqueJobs.logging?
234
+ end
221
235
  end
222
236
  end
@@ -17,7 +17,7 @@ local limit = tonumber(ARGV[4])
17
17
 
18
18
  -------- BEGIN injected arguments --------
19
19
  local current_time = tonumber(ARGV[5])
20
- local debug_lua = tostring(ARGV[6]) == "true"
20
+ local debug_lua = tostring(ARGV[6]) == "1"
21
21
  local max_history = tonumber(ARGV[7])
22
22
  local script_name = tostring(ARGV[8]) .. ".lua"
23
23
  local redisversion = tostring(ARGV[9])
@@ -33,15 +33,12 @@ log_debug("BEGIN delete", digest)
33
33
 
34
34
  local redis_version = toversion(redisversion)
35
35
  local count = 0
36
- local del_cmd = "DEL"
37
36
 
38
37
  log_debug("ZREM", digests, digest)
39
38
  count = count + redis.call("ZREM", digests, digest)
40
39
 
41
- if redis_version["major"] >= 4 then del_cmd = "UNLINK"; end
42
-
43
- log_debug(del_cmd, digest, queued, primed, locked, info)
44
- count = count + redis.call(del_cmd, digest, queued, primed, locked, info)
40
+ log_debug("UNLINK", digest, queued, primed, locked, info)
41
+ count = count + redis.call("UNLINK", digest, queued, primed, locked, info)
45
42
 
46
43
 
47
44
  log("Deleted (" .. count .. ") keys")
@@ -12,7 +12,7 @@ local digests = KEYS[9]
12
12
 
13
13
  -------- BEGIN injected arguments --------
14
14
  local current_time = tonumber(ARGV[1])
15
- local debug_lua = ARGV[2] == "true"
15
+ local debug_lua = tostring(ARGV[2]) == "1"
16
16
  local max_history = tonumber(ARGV[3])
17
17
  local script_name = tostring(ARGV[4]) .. ".lua"
18
18
  local redisversion = tostring(ARGV[5])
@@ -25,14 +25,11 @@ local redisversion = tostring(ARGV[5])
25
25
  -------- BEGIN delete_by_digest.lua --------
26
26
  local counter = 0
27
27
  local redis_version = toversion(redisversion)
28
- local del_cmd = "DEL"
29
28
 
30
29
  log_debug("BEGIN delete_by_digest:", digest)
31
30
 
32
- if redis_version["major"] >= 4 then del_cmd = "UNLINK"; end
33
-
34
- log_debug(del_cmd, digest, queued, primed, locked, run_digest, run_queued, run_primed, run_locked)
35
- counter = redis.call(del_cmd, digest, queued, primed, locked, run_digest, run_queued, run_primed, run_locked)
31
+ log_debug("UNLINK", digest, queued, primed, locked, run_digest, run_queued, run_primed, run_locked)
32
+ counter = redis.call("UNLINK", digest, queued, primed, locked, run_digest, run_queued, run_primed, run_locked)
36
33
 
37
34
  log_debug("ZREM", digests, digest)
38
35
  redis.call("ZREM", digests, digest)
@@ -10,7 +10,7 @@ local digest = ARGV[1]
10
10
 
11
11
  -------- BEGIN injected arguments --------
12
12
  local current_time = tonumber(ARGV[2])
13
- local debug_lua = ARGV[3] == "true"
13
+ local debug_lua = tostring(ARGV[3]) == "1"
14
14
  local max_history = tonumber(ARGV[4])
15
15
  local script_name = tostring(ARGV[5]) .. ".lua"
16
16
  --------- END injected arguments ---------
@@ -4,7 +4,7 @@ local digest = KEYS[1]
4
4
 
5
5
  -------- BEGIN injected arguments --------
6
6
  local current_time = tonumber(ARGV[2])
7
- local debug_lua = ARGV[3] == "true"
7
+ local debug_lua = tostring(ARGV[3]) == "1"
8
8
  local max_history = tonumber(ARGV[4])
9
9
  local script_name = tostring(ARGV[5]) .. ".lua"
10
10
  --------- END injected arguments ---------
@@ -1,11 +1,12 @@
1
1
  -------- BEGIN keys ---------
2
- local digest = KEYS[1]
3
- local queued = KEYS[2]
4
- local primed = KEYS[3]
5
- local locked = KEYS[4]
6
- local info = KEYS[5]
7
- local changelog = KEYS[6]
8
- local digests = KEYS[7]
2
+ local digest = KEYS[1]
3
+ local queued = KEYS[2]
4
+ local primed = KEYS[3]
5
+ local locked = KEYS[4]
6
+ local info = KEYS[5]
7
+ local changelog = KEYS[6]
8
+ local digests = KEYS[7]
9
+ local expiring_digests = KEYS[8]
9
10
  -------- END keys ---------
10
11
 
11
12
 
@@ -19,7 +20,7 @@ local limit = tonumber(ARGV[4])
19
20
 
20
21
  -------- BEGIN injected arguments --------
21
22
  local current_time = tonumber(ARGV[5])
22
- local debug_lua = ARGV[6] == "true"
23
+ local debug_lua = tostring(ARGV[6]) == "1"
23
24
  local max_history = tonumber(ARGV[7])
24
25
  local script_name = tostring(ARGV[8]) .. ".lua"
25
26
  local redisversion = ARGV[9]
@@ -57,8 +58,13 @@ if limit_exceeded then
57
58
  return nil
58
59
  end
59
60
 
60
- log_debug("ZADD", digests, current_time, digest)
61
- redis.call("ZADD", digests, current_time, digest)
61
+ if lock_type == "until_expired" and pttl and pttl > 0 then
62
+ log_debug("ZADD", expiring_digests, current_time + pttl, digest)
63
+ redis.call("ZADD", expiring_digests, current_time + pttl, digest)
64
+ else
65
+ log_debug("ZADD", digests, current_time, digest)
66
+ redis.call("ZADD", digests, current_time, digest)
67
+ end
62
68
 
63
69
  log_debug("HSET", locked, job_id, current_time)
64
70
  redis.call("HSET", locked, job_id, current_time)
@@ -0,0 +1,92 @@
1
+ -------- BEGIN keys ---------
2
+ local digest = KEYS[1]
3
+ local queued = KEYS[2]
4
+ local primed = KEYS[3]
5
+ local locked = KEYS[4]
6
+ local info = KEYS[5]
7
+ local changelog = KEYS[6]
8
+ local digests = KEYS[7]
9
+ local expiring_digests = KEYS[8]
10
+ -------- END keys ---------
11
+
12
+
13
+ -------- BEGIN lock arguments ---------
14
+ local job_id = ARGV[1]
15
+ local pttl = tonumber(ARGV[2])
16
+ local lock_type = ARGV[3]
17
+ local limit = tonumber(ARGV[4])
18
+ -------- END lock arguments -----------
19
+
20
+
21
+ -------- BEGIN injected arguments --------
22
+ local current_time = tonumber(ARGV[5])
23
+ local debug_lua = tostring(ARGV[6]) == "1"
24
+ local max_history = tonumber(ARGV[7])
25
+ local script_name = tostring(ARGV[8]) .. ".lua"
26
+ local redisversion = ARGV[9]
27
+ --------- END injected arguments ---------
28
+
29
+
30
+ -------- BEGIN local functions --------
31
+ <%= include_partial "shared/_common.lua" %>
32
+ ---------- END local functions ----------
33
+
34
+
35
+ --------- BEGIN lock.lua ---------
36
+ log_debug("BEGIN lock digest:", digest, "job_id:", job_id)
37
+
38
+ if redis.call("HEXISTS", locked, job_id) == 1 then
39
+ log_debug(locked, "already locked with job_id:", job_id)
40
+ log("Duplicate")
41
+
42
+ log_debug("LREM", queued, -1, job_id)
43
+ redis.call("LREM", queued, -1, job_id)
44
+
45
+ log_debug("LREM", primed, 1, job_id)
46
+ redis.call("LREM", primed, 1, job_id)
47
+
48
+ return job_id
49
+ end
50
+
51
+ local locked_count = redis.call("HLEN", locked)
52
+ local within_limit = limit > locked_count
53
+ local limit_exceeded = not within_limit
54
+
55
+ if limit_exceeded then
56
+ log_debug("Limit exceeded:", digest, "(", locked_count, "of", limit, ")")
57
+ log("Limited")
58
+ return nil
59
+ end
60
+
61
+ log_debug("ZADD", expiring_digests, current_time + pttl, digest)
62
+ redis.call("ZADD", expiring_digests, current_time + pttl, digest)
63
+
64
+ log_debug("HSET", locked, job_id, current_time)
65
+ redis.call("HSET", locked, job_id, current_time)
66
+
67
+ log_debug("LREM", queued, -1, job_id)
68
+ redis.call("LREM", queued, -1, job_id)
69
+
70
+ log_debug("LREM", primed, 1, job_id)
71
+ redis.call("LREM", primed, 1, job_id)
72
+
73
+ -- The Sidekiq client sets pttl
74
+ log_debug("PEXPIRE", digest, pttl)
75
+ redis.call("PEXPIRE", digest, pttl)
76
+
77
+ log_debug("PEXPIRE", locked, pttl)
78
+ redis.call("PEXPIRE", locked, pttl)
79
+
80
+ log_debug("PEXPIRE", info, pttl)
81
+ redis.call("PEXPIRE", info, pttl)
82
+
83
+ log_debug("PEXPIRE", queued, 1000)
84
+ redis.call("PEXPIRE", queued, 1000)
85
+
86
+ log_debug("PEXPIRE", primed, 1000)
87
+ redis.call("PEXPIRE", primed, 1000)
88
+
89
+ log("Locked")
90
+ log_debug("END lock digest:", digest, "job_id:", job_id)
91
+ return job_id
92
+ ---------- END lock.lua ----------
@@ -14,7 +14,7 @@ local job_id = ARGV[1]
14
14
 
15
15
  -------- BEGIN injected arguments --------
16
16
  local current_time = tonumber(ARGV[2])
17
- local debug_lua = ARGV[3] == "true"
17
+ local debug_lua = tostring(ARGV[3]) == "1"
18
18
  local max_history = tonumber(ARGV[4])
19
19
  local script_name = tostring(ARGV[5]) .. ".lua"
20
20
  --------- END injected arguments ---------
@@ -19,7 +19,7 @@ local limit = tonumber(ARGV[4])
19
19
 
20
20
  -------- BEGIN injected arguments --------
21
21
  local current_time = tonumber(ARGV[5])
22
- local debug_lua = ARGV[6] == "true"
22
+ local debug_lua = tostring(ARGV[6]) == "1"
23
23
  local max_history = tonumber(ARGV[7])
24
24
  local script_name = tostring(ARGV[8]) .. ".lua"
25
25
  --------- END injected arguments ---------
@@ -1,9 +1,10 @@
1
1
  redis.replicate_commands()
2
2
 
3
3
  -------- BEGIN keys ---------
4
- local digests_set = KEYS[1]
5
- local schedule_set = KEYS[2]
6
- local retry_set = KEYS[3]
4
+ local digests_set = KEYS[1]
5
+ local expiring_digests_set = KEYS[2]
6
+ local schedule_set = KEYS[3]
7
+ local retry_set = KEYS[4]
7
8
  -------- END keys ---------
8
9
 
9
10
  -------- BEGIN argv ---------
@@ -13,7 +14,7 @@ local threshold = tonumber(ARGV[2])
13
14
 
14
15
  -------- BEGIN injected arguments --------
15
16
  local current_time = tonumber(ARGV[3])
16
- local debug_lua = ARGV[4] == "true"
17
+ local debug_lua = tostring(ARGV[4]) == "1"
17
18
  local max_history = tonumber(ARGV[5])
18
19
  local script_name = ARGV[6] .. ".lua"
19
20
  local redisversion = ARGV[7]
@@ -36,9 +37,6 @@ local total = redis.call("ZCARD", digests_set)
36
37
  local index = 0
37
38
  local del_count = 0
38
39
  local redis_ver = toversion(redisversion)
39
- local del_cmd = "DEL"
40
-
41
- if tonumber(redis_ver["major"]) >= 4 then del_cmd = "UNLINK"; end
42
40
 
43
41
  repeat
44
42
  log_debug("Interating through:", digests_set, "for orphaned locks")
@@ -80,7 +78,7 @@ repeat
80
78
  local run_locked = digest .. ":RUN:LOCKED"
81
79
  local run_info = digest .. ":RUN:INFO"
82
80
 
83
- redis.call(del_cmd, digest, queued, primed, locked, info, run_digest, run_queued, run_primed, run_locked, run_info)
81
+ redis.call("UNLINK", digest, queued, primed, locked, info, run_digest, run_queued, run_primed, run_locked, run_info)
84
82
 
85
83
  redis.call("ZREM", digests_set, digest)
86
84
  del_count = del_count + 1
@@ -90,5 +88,32 @@ repeat
90
88
  index = index + per
91
89
  until index >= total or del_count >= reaper_count
92
90
 
91
+ if del_count < reaper_count then
92
+ index = 0
93
+ total = redis.call("ZCOUNT", expiring_digests_set, 0, current_time)
94
+ repeat
95
+ local digests = redis.call("ZRANGEBYSCORE", expiring_digests_set, 0, current_time, "LIMIT", index, index + per -1)
96
+
97
+ for _, digest in pairs(digests) do
98
+ local queued = digest .. ":QUEUED"
99
+ local primed = digest .. ":PRIMED"
100
+ local locked = digest .. ":LOCKED"
101
+ local info = digest .. ":INFO"
102
+ local run_digest = digest .. ":RUN"
103
+ local run_queued = digest .. ":RUN:QUEUED"
104
+ local run_primed = digest .. ":RUN:PRIMED"
105
+ local run_locked = digest .. ":RUN:LOCKED"
106
+ local run_info = digest .. ":RUN:INFO"
107
+
108
+ redis.call("UNLINK", digest, queued, primed, locked, info, run_digest, run_queued, run_primed, run_locked, run_info)
109
+
110
+ redis.call("ZREM", expiring_digests_set, digest)
111
+ del_count = del_count + 1
112
+ end
113
+
114
+ index = index + per
115
+ until index >= total or del_count >= reaper_count
116
+ end
117
+
93
118
  log_debug("END")
94
119
  return del_count
@@ -9,11 +9,6 @@ local function toversion(version)
9
9
  }
10
10
  end
11
11
 
12
- local function toboolean(val)
13
- val = tostring(val)
14
- return val == "1" or val == "true"
15
- end
16
-
17
12
  local function log_debug( ... )
18
13
  if debug_lua ~= true then return end
19
14
 
@@ -31,7 +26,7 @@ local function log(message, prev_jid)
31
26
  log_debug("ZADD", changelog, current_time, entry);
32
27
  redis.call("ZADD", changelog, current_time, entry);
33
28
  local total_entries = redis.call("ZCARD", changelog)
34
- local removed_entries = redis.call("ZREMRANGEBYRANK", changelog, max_history, -1)
29
+ local removed_entries = redis.call("ZREMRANGEBYRANK", changelog, 0, -1 * max_history)
35
30
  if removed_entries > 0 then
36
31
  log_debug("Removing", removed_entries , "entries from changelog (total entries", total_entries, "exceeds max_history:", max_history ..")");
37
32
  end
@@ -3,6 +3,7 @@ local function delete_from_sorted_set(name, digest)
3
3
  local total = redis.call("zcard", name)
4
4
  local index = 0
5
5
  local result
6
+
6
7
  while (index < total) do
7
8
  local items = redis.call("ZRANGE", name, index, index + per -1)
8
9
  for _, item in pairs(items) do
@@ -15,7 +15,7 @@ local function find_digest_in_process_set(digest, threshold)
15
15
  log_debug("Found number of processes:", #processes, "next cursor:", next_process_cursor)
16
16
 
17
17
  for _, process in ipairs(processes) do
18
- local workers_key = process .. ":workers"
18
+ local workers_key = process .. ":work"
19
19
  log_debug("searching in process set:", process,
20
20
  "for digest:", digest,
21
21
  "cursor:", process_cursor)
@@ -19,7 +19,7 @@ local limit = tonumber(ARGV[4])
19
19
 
20
20
  -------- BEGIN injected arguments --------
21
21
  local current_time = tonumber(ARGV[5])
22
- local debug_lua = ARGV[6] == "true"
22
+ local debug_lua = tostring(ARGV[6]) == "1"
23
23
  local max_history = tonumber(ARGV[7])
24
24
  local script_name = tostring(ARGV[8]) .. ".lua"
25
25
  local redisversion = ARGV[9]
@@ -65,20 +65,11 @@ redis.call("LREM", queued, -1, job_id)
65
65
  log_debug("LREM", primed, -1, job_id)
66
66
  redis.call("LREM", primed, -1, job_id)
67
67
 
68
- if limit and limit <= 1 and locked_count and locked_count <= 1 then
69
- log_debug("ZREM", digests, digest)
70
- redis.call("ZREM", digests, digest)
71
- end
72
-
73
68
  local redis_version = toversion(redisversion)
74
- local del_cmd = "DEL"
75
-
76
- if tonumber(redis_version["major"]) >= 4 then del_cmd = "UNLINK"; end
77
69
 
78
70
  if lock_type ~= "until_expired" then
79
-
80
- log_debug(del_cmd, digest, info)
81
- redis.call(del_cmd, digest, info)
71
+ log_debug("UNLINK", digest, info)
72
+ redis.call("UNLINK", digest, info)
82
73
 
83
74
  log_debug("HDEL", locked, job_id)
84
75
  redis.call("HDEL", locked, job_id)
@@ -86,9 +77,19 @@ end
86
77
 
87
78
  local locked_count = redis.call("HLEN", locked)
88
79
 
89
- if tonumber(locked_count) < 1 then
90
- log_debug(del_cmd, locked)
91
- redis.call(del_cmd, locked)
80
+ if locked_count and locked_count < 1 then
81
+ log_debug("UNLINK", locked)
82
+ redis.call("UNLINK", locked)
83
+ end
84
+
85
+ if redis.call("LLEN", primed) == 0 then
86
+ log_debug("UNLINK", primed)
87
+ redis.call("UNLINK", primed)
88
+ end
89
+
90
+ if limit and limit <= 1 and locked_count and locked_count <= 1 then
91
+ log_debug("ZREM", digests, digest)
92
+ redis.call("ZREM", digests, digest)
92
93
  end
93
94
 
94
95
  log_debug("LPUSH", queued, "1")
@@ -9,7 +9,7 @@ local version = ARGV[1]
9
9
 
10
10
  -------- BEGIN injected arguments --------
11
11
  local current_time = tonumber(ARGV[2])
12
- local debug_lua = ARGV[3] == "true"
12
+ local debug_lua = tostring(ARGV[3]) == "1"
13
13
  local max_history = tonumber(ARGV[4])
14
14
  local script_name = tostring(ARGV[5]) .. ".lua"
15
15
  --------- END injected arguments ---------
@@ -6,7 +6,7 @@ local dead_version = KEYS[2]
6
6
 
7
7
  -------- BEGIN injected arguments --------
8
8
  local current_time = tonumber(ARGV[5])
9
- local debug_lua = ARGV[6] == "true"
9
+ local debug_lua = tostring(ARGV[6]) == "1"
10
10
  local max_history = tonumber(ARGV[7])
11
11
  local script_name = tostring(ARGV[8]) .. ".lua"
12
12
  local redisversion = ARGV[9]
@@ -22,9 +22,7 @@ local new_version = redis.call("GET", live_version)
22
22
  local old_version = redis.call("GET", dead_version)
23
23
  local redis_version = toversion(redisversion)
24
24
  local upgraded = 0
25
- local del_cmd = "DEL"
26
25
 
27
- if redis_version["major"] >= 4 then del_cmd = "UNLINK"; end
28
26
  -------- BEGIN delete.lua --------
29
27
 
30
28
  log_debug("BEGIN upgrading from: ", old_version, "to:", new_version)
@@ -6,6 +6,8 @@ module SidekiqUniqueJobs
6
6
  #
7
7
  # @author Mikael Henriksson <mikael@mhenrixon.com>
8
8
  class Client
9
+ include Sidekiq::ClientMiddleware if defined?(Sidekiq::ClientMiddleware)
10
+
9
11
  # prepend "SidekiqUniqueJobs::Middleware"
10
12
  # @!parse prepends SidekiqUniqueJobs::Middleware
11
13
  prepend SidekiqUniqueJobs::Middleware
@@ -6,6 +6,8 @@ module SidekiqUniqueJobs
6
6
  #
7
7
  # @author Mikael Henriksson <mikael@mhenrixon.com>
8
8
  class Server
9
+ include Sidekiq::ServerMiddleware if defined?(Sidekiq::ServerMiddleware)
10
+
9
11
  # prepend "SidekiqUniqueJobs::Middleware"
10
12
  # @!parse prepends SidekiqUniqueJobs::Middleware
11
13
  prepend SidekiqUniqueJobs::Middleware
@@ -29,10 +29,10 @@ module SidekiqUniqueJobs
29
29
  # @yieldparam [<type>] if <description>
30
30
  # @yieldreturn [<type>] <describe what yield should return>
31
31
  def call(worker_class, item, queue, redis_pool = nil)
32
- @worker_class = worker_class
33
- @item = item
34
- @queue = queue
35
- @redis_pool = redis_pool
32
+ @item = item
33
+ @queue = queue
34
+ @redis_pool = redis_pool
35
+ self.job_class = worker_class
36
36
  return yield if unique_disabled?
37
37
 
38
38
  SidekiqUniqueJobs::Job.prepare(item) unless item[LOCK_DIGEST]
@@ -12,33 +12,6 @@ module SidekiqUniqueJobs
12
12
  def call
13
13
  log_info { "Adding dead #{item[CLASS]} job #{item[JID]}" }
14
14
 
15
- if deadset_kill?
16
- deadset_kill
17
- else
18
- push_to_deadset
19
- end
20
- end
21
-
22
- #
23
- # Sidekiq version compatibility check
24
- # @api private
25
- #
26
- #
27
- # @return [true, false] depending on if Sidekiq::Deadset responds to kill
28
- #
29
- def deadset_kill?
30
- deadset.respond_to?(:kill)
31
- end
32
-
33
- #
34
- # Use Sidekiqs built in Sidekiq::DeadSet#kill
35
- # to get rid of the job
36
- # @api private
37
- #
38
- #
39
- # @return [void]
40
- #
41
- def deadset_kill
42
15
  if kill_with_options?
43
16
  kill_job_with_options
44
17
  else
@@ -88,22 +61,6 @@ module SidekiqUniqueJobs
88
61
  @deadset ||= Sidekiq::DeadSet.new
89
62
  end
90
63
 
91
- #
92
- # Used for compatibility with older Sidekiq versions
93
- #
94
- #
95
- # @return [void]
96
- #
97
- def push_to_deadset
98
- redis do |conn|
99
- conn.multi do
100
- conn.zadd("dead", now_f, payload)
101
- conn.zremrangebyscore("dead", "-inf", now_f - Sidekiq::DeadSet.timeout)
102
- conn.zremrangebyrank("dead", 0, -Sidekiq::DeadSet.max_jobs)
103
- end
104
- end
105
- end
106
-
107
64
  #
108
65
  # The Sidekiq job hash as JSON
109
66
  #
@@ -14,14 +14,14 @@ module SidekiqUniqueJobs
14
14
  # @param [Hash] item sidekiq job hash
15
15
  def initialize(item, redis_pool = nil)
16
16
  super(item, redis_pool)
17
- @worker_class = item[CLASS]
17
+ self.job_class = item[CLASS]
18
18
  end
19
19
 
20
20
  # Create a new job from the current one.
21
21
  # This will mess up sidekiq stats because a new job is created
22
22
  def call
23
- if sidekiq_worker_class?
24
- if worker_class.perform_in(5, *item[ARGS])
23
+ if sidekiq_job_class?
24
+ if job_class.set(queue: item["queue"].to_sym).perform_in(5, *item[ARGS])
25
25
  reflect(:rescheduled, item)
26
26
  else
27
27
  reflect(:reschedule_failed, item)
@@ -6,7 +6,7 @@ module SidekiqUniqueJobs
6
6
  # Requires the following methods to be defined in the including class
7
7
  # 1. item (required)
8
8
  # 2. options (can be nil)
9
- # 3. worker_class (required, can be anything)
9
+ # 3. job_class (required, can be anything)
10
10
  # @author Mikael Henriksson <mikael@mhenrixon.com>
11
11
  module OptionsWithFallback
12
12
  def self.included(base)
@@ -69,8 +69,8 @@ module SidekiqUniqueJobs
69
69
  #
70
70
  def options
71
71
  @options ||= begin
72
- opts = default_worker_options.dup
73
- opts.merge!(worker_options) if sidekiq_worker_class?
72
+ opts = default_job_options.dup
73
+ opts.merge!(job_options) if sidekiq_job_class?
74
74
  (opts || {}).stringify_keys
75
75
  end
76
76
  end
@@ -20,7 +20,7 @@ module SidekiqUniqueJobs
20
20
  call_script(
21
21
  :reap_orphans,
22
22
  conn,
23
- keys: [DIGESTS, SCHEDULE, RETRY, PROCESSES],
23
+ keys: [DIGESTS, EXPIRING_DIGESTS, SCHEDULE, RETRY, PROCESSES],
24
24
  argv: [reaper_count, (Time.now - reaper_timeout).to_f],
25
25
  )
26
26
  end
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "concurrent/version"
4
+
3
5
  module SidekiqUniqueJobs
4
6
  module Orphans
5
7
  #
@@ -108,9 +110,7 @@ module SidekiqUniqueJobs
108
110
  # @return [Hash]
109
111
  #
110
112
  def timer_task_options
111
- { run_now: true,
112
- execution_interval: reaper_interval,
113
- timeout_interval: reaper_timeout }
113
+ { run_now: true, execution_interval: reaper_interval }
114
114
  end
115
115
 
116
116
  #
@@ -127,13 +127,6 @@ module SidekiqUniqueJobs
127
127
  SidekiqUniqueJobs.config.reaper_interval
128
128
  end
129
129
 
130
- #
131
- # @see SidekiqUniqueJobs::Config#reaper_timeout
132
- #
133
- def reaper_timeout
134
- SidekiqUniqueJobs.config.reaper_timeout
135
- end
136
-
137
130
  #
138
131
  # A context to use for all log entries
139
132
  #
@@ -224,7 +217,7 @@ module SidekiqUniqueJobs
224
217
 
225
218
  #
226
219
  # Reaper interval with a little drift
227
- # Redis isn't exact enough so to give a little bufffer,
220
+ # Redis isn't exact enough so to give a little buffer,
228
221
  # we add a tiny value to the reaper interval.
229
222
  #
230
223
  #