sidekiq-unique-jobs 8.0.10 → 8.0.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +54 -6
- data/README.md +62 -49
- data/lib/sidekiq_unique_jobs/cli.rb +2 -2
- data/lib/sidekiq_unique_jobs/config.rb +65 -33
- data/lib/sidekiq_unique_jobs/digests.rb +1 -1
- data/lib/sidekiq_unique_jobs/exceptions.rb +2 -2
- data/lib/sidekiq_unique_jobs/job.rb +1 -1
- data/lib/sidekiq_unique_jobs/lock/base_lock.rb +8 -4
- data/lib/sidekiq_unique_jobs/lock/until_and_while_executing.rb +7 -4
- data/lib/sidekiq_unique_jobs/lock/until_executing.rb +1 -1
- data/lib/sidekiq_unique_jobs/lock/while_executing.rb +1 -1
- data/lib/sidekiq_unique_jobs/lock.rb +1 -1
- data/lib/sidekiq_unique_jobs/lock_args.rb +3 -3
- data/lib/sidekiq_unique_jobs/lock_digest.rb +6 -1
- data/lib/sidekiq_unique_jobs/lock_ttl.rb +34 -8
- data/lib/sidekiq_unique_jobs/locksmith.rb +25 -7
- data/lib/sidekiq_unique_jobs/logging.rb +2 -2
- data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_process_set.lua +8 -3
- data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_queues.lua +11 -0
- data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_sorted_set.lua +5 -1
- data/lib/sidekiq_unique_jobs/lua/unlock.lua +20 -12
- data/lib/sidekiq_unique_jobs/on_conflict/reject.rb +10 -1
- data/lib/sidekiq_unique_jobs/on_conflict/replace.rb +3 -3
- data/lib/sidekiq_unique_jobs/on_conflict/reschedule.rb +1 -1
- data/lib/sidekiq_unique_jobs/on_conflict.rb +2 -2
- data/lib/sidekiq_unique_jobs/orphans/manager.rb +3 -3
- data/lib/sidekiq_unique_jobs/orphans/ruby_reaper.rb +36 -9
- data/lib/sidekiq_unique_jobs/reflections.rb +3 -3
- data/lib/sidekiq_unique_jobs/rspec/matchers/have_valid_sidekiq_options.rb +3 -1
- data/lib/sidekiq_unique_jobs/script/client.rb +11 -3
- data/lib/sidekiq_unique_jobs/script/lua_error.rb +2 -0
- data/lib/sidekiq_unique_jobs/script/scripts.rb +42 -46
- data/lib/sidekiq_unique_jobs/sidekiq_unique_ext.rb +2 -2
- data/lib/sidekiq_unique_jobs/sidekiq_unique_jobs.rb +4 -4
- data/lib/sidekiq_unique_jobs/sidekiq_worker_methods.rb +1 -1
- data/lib/sidekiq_unique_jobs/testing.rb +2 -2
- data/lib/sidekiq_unique_jobs/version.rb +1 -1
- data/lib/sidekiq_unique_jobs/web/helpers.rb +29 -1
- data/lib/sidekiq_unique_jobs/web.rb +38 -30
- metadata +5 -8
|
@@ -134,9 +134,13 @@ module SidekiqUniqueJobs
|
|
|
134
134
|
def callback_safely
|
|
135
135
|
callback&.call
|
|
136
136
|
item[JID]
|
|
137
|
-
rescue StandardError
|
|
138
|
-
reflect(:after_unlock_callback_failed, item)
|
|
139
|
-
raise
|
|
137
|
+
rescue StandardError => ex
|
|
138
|
+
reflect(:after_unlock_callback_failed, item, ex)
|
|
139
|
+
# Don't re-raise: lock is already unlocked, can't rollback
|
|
140
|
+
# Re-raising would cause job retry with lock already released
|
|
141
|
+
# leading to potential double execution
|
|
142
|
+
log_warn("After unlock callback failed: #{ex.class} - #{ex.message}")
|
|
143
|
+
item[JID]
|
|
140
144
|
end
|
|
141
145
|
|
|
142
146
|
def strategy_for(origin)
|
|
@@ -147,7 +151,7 @@ module SidekiqUniqueJobs
|
|
|
147
151
|
server_strategy
|
|
148
152
|
else
|
|
149
153
|
raise SidekiqUniqueJobs::InvalidArgument,
|
|
150
|
-
|
|
154
|
+
"#origin needs to be either `:server` or `:client`"
|
|
151
155
|
end
|
|
152
156
|
end
|
|
153
157
|
|
|
@@ -45,9 +45,11 @@ module SidekiqUniqueJobs
|
|
|
45
45
|
else
|
|
46
46
|
reflect(:unlock_failed, item)
|
|
47
47
|
end
|
|
48
|
-
rescue
|
|
48
|
+
rescue StandardError
|
|
49
49
|
reflect(:execution_failed, item)
|
|
50
|
-
|
|
50
|
+
# Re-acquire the "until" lock to prevent duplicates while job is in retry
|
|
51
|
+
# Use non-blocking lock attempt to avoid hanging on shutdown
|
|
52
|
+
locksmith.lock(wait: 0)
|
|
51
53
|
|
|
52
54
|
raise
|
|
53
55
|
end
|
|
@@ -56,9 +58,10 @@ module SidekiqUniqueJobs
|
|
|
56
58
|
|
|
57
59
|
def ensure_relocked
|
|
58
60
|
yield
|
|
59
|
-
rescue
|
|
61
|
+
rescue StandardError
|
|
60
62
|
reflect(:execution_failed, item)
|
|
61
|
-
|
|
63
|
+
# Re-acquire the "until" lock to prevent duplicates while job is in retry
|
|
64
|
+
locksmith.lock(wait: 0)
|
|
62
65
|
|
|
63
66
|
raise
|
|
64
67
|
end
|
|
@@ -6,7 +6,7 @@ module SidekiqUniqueJobs
|
|
|
6
6
|
#
|
|
7
7
|
# @author Mikael Henriksson <mikael@mhenrixon.com>
|
|
8
8
|
#
|
|
9
|
-
class Lock
|
|
9
|
+
class Lock
|
|
10
10
|
# includes "SidekiqUniqueJobs::Connection"
|
|
11
11
|
# @!parse include SidekiqUniqueJobs::Connection
|
|
12
12
|
include SidekiqUniqueJobs::Connection
|
|
@@ -91,9 +91,9 @@ module SidekiqUniqueJobs
|
|
|
91
91
|
job_class.send(lock_args_method, args)
|
|
92
92
|
rescue ArgumentError
|
|
93
93
|
raise SidekiqUniqueJobs::InvalidUniqueArguments,
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
94
|
+
given: args,
|
|
95
|
+
job_class: job_class,
|
|
96
|
+
lock_args_method: lock_args_method
|
|
97
97
|
end
|
|
98
98
|
|
|
99
99
|
# The method to use for filtering unique arguments
|
|
@@ -51,7 +51,12 @@ module SidekiqUniqueJobs
|
|
|
51
51
|
# Creates a namespaced unique digest based on the {#digestable_hash} and the {#lock_prefix}
|
|
52
52
|
# @return [String] a unique digest
|
|
53
53
|
def create_digest
|
|
54
|
-
digest =
|
|
54
|
+
digest = if SidekiqUniqueJobs.config.digest_algorithm == :legacy
|
|
55
|
+
OpenSSL::Digest::MD5.hexdigest(dump_json(digestable_hash.sort))
|
|
56
|
+
else
|
|
57
|
+
OpenSSL::Digest.new("SHA3-256", dump_json(digestable_hash.sort)).hexdigest
|
|
58
|
+
end
|
|
59
|
+
|
|
55
60
|
"#{lock_prefix}:#{digest}"
|
|
56
61
|
end
|
|
57
62
|
|
|
@@ -40,13 +40,15 @@ module SidekiqUniqueJobs
|
|
|
40
40
|
#
|
|
41
41
|
# Calculates the time until the job is scheduled starting from now
|
|
42
42
|
#
|
|
43
|
+
# @note Ensures result is never negative to prevent TTL calculation issues
|
|
43
44
|
#
|
|
44
|
-
# @return [Integer] the number of seconds until job is scheduled
|
|
45
|
+
# @return [Integer] the number of seconds until job is scheduled (>= 0)
|
|
45
46
|
#
|
|
46
47
|
def time_until_scheduled
|
|
47
48
|
return 0 unless scheduled_at
|
|
48
49
|
|
|
49
|
-
|
|
50
|
+
# Clamp to 0 to prevent negative values if job is already overdue
|
|
51
|
+
[0, scheduled_at.to_i - Time.now.utc.to_i].max
|
|
50
52
|
end
|
|
51
53
|
|
|
52
54
|
# The time a job is scheduled
|
|
@@ -66,12 +68,36 @@ module SidekiqUniqueJobs
|
|
|
66
68
|
# @return [Integer] the number of seconds to live
|
|
67
69
|
#
|
|
68
70
|
def calculate
|
|
69
|
-
ttl =
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
71
|
+
ttl = fetch_ttl
|
|
72
|
+
return unless ttl
|
|
73
|
+
|
|
74
|
+
timing = calculate_timing(ttl)
|
|
75
|
+
return unless timing
|
|
76
|
+
|
|
77
|
+
timing.to_i + time_until_scheduled
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
private
|
|
81
|
+
|
|
82
|
+
def fetch_ttl
|
|
83
|
+
item[LOCK_TTL] ||
|
|
84
|
+
job_options[LOCK_TTL] ||
|
|
85
|
+
item[LOCK_EXPIRATION] || # TODO: Deprecate at some point
|
|
86
|
+
job_options[LOCK_EXPIRATION] || # TODO: Deprecate at some point
|
|
87
|
+
SidekiqUniqueJobs.config.lock_ttl
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
def calculate_timing(ttl)
|
|
91
|
+
case ttl
|
|
92
|
+
when String, Numeric
|
|
93
|
+
ttl
|
|
94
|
+
when Proc
|
|
95
|
+
ttl.call(item[ARGS])
|
|
96
|
+
when Symbol
|
|
97
|
+
job_class.send(ttl, item[ARGS])
|
|
98
|
+
else
|
|
99
|
+
raise ArgumentError, "#{ttl.class} is not supported for lock_ttl"
|
|
100
|
+
end
|
|
75
101
|
end
|
|
76
102
|
end
|
|
77
103
|
end
|
|
@@ -4,7 +4,7 @@ module SidekiqUniqueJobs
|
|
|
4
4
|
# Lock manager class that handles all the various locks
|
|
5
5
|
#
|
|
6
6
|
# @author Mikael Henriksson <mikael@mhenrixon.com>
|
|
7
|
-
class Locksmith
|
|
7
|
+
class Locksmith
|
|
8
8
|
# includes "SidekiqUniqueJobs::Connection"
|
|
9
9
|
# @!parse include SidekiqUniqueJobs::Connection
|
|
10
10
|
include SidekiqUniqueJobs::Connection
|
|
@@ -34,6 +34,11 @@ module SidekiqUniqueJobs
|
|
|
34
34
|
CLOCK_DRIFT_FACTOR = 0.01
|
|
35
35
|
NETWORK_FACTOR = 0.04
|
|
36
36
|
|
|
37
|
+
#
|
|
38
|
+
# @return [Integer] Maximum wait time for blocking Redis operations (in seconds)
|
|
39
|
+
# Prevents blocking web requests indefinitely when used in client middleware
|
|
40
|
+
MAX_BLOCKING_WAIT = 5
|
|
41
|
+
|
|
37
42
|
#
|
|
38
43
|
# @!attribute [r] key
|
|
39
44
|
# @return [Key] the key used for locking
|
|
@@ -81,9 +86,11 @@ module SidekiqUniqueJobs
|
|
|
81
86
|
#
|
|
82
87
|
# Deletes the lock regardless of if it has a pttl set
|
|
83
88
|
#
|
|
89
|
+
# rubocop:disable Naming/PredicateMethod
|
|
84
90
|
def delete!
|
|
85
91
|
call_script(:delete, key.to_a, argv).to_i.positive?
|
|
86
92
|
end
|
|
93
|
+
# rubocop:enable Naming/PredicateMethod
|
|
87
94
|
|
|
88
95
|
#
|
|
89
96
|
# Create a lock for the Sidekiq job
|
|
@@ -243,7 +250,7 @@ module SidekiqUniqueJobs
|
|
|
243
250
|
# @return [nil] when lock was not possible
|
|
244
251
|
# @return [Object] whatever the block returns when lock was acquired
|
|
245
252
|
#
|
|
246
|
-
def primed_async(conn, wait = nil, &block)
|
|
253
|
+
def primed_async(conn, wait = nil, &block)
|
|
247
254
|
timeout = (wait || config.timeout).to_i
|
|
248
255
|
timeout = 1 if timeout.zero?
|
|
249
256
|
|
|
@@ -251,16 +258,17 @@ module SidekiqUniqueJobs
|
|
|
251
258
|
concurrent_timeout = add_drift(timeout)
|
|
252
259
|
|
|
253
260
|
reflect(:debug, :timeouts, item,
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
261
|
+
timeouts: {
|
|
262
|
+
brpoplpush_timeout: brpoplpush_timeout,
|
|
263
|
+
concurrent_timeout: concurrent_timeout,
|
|
264
|
+
})
|
|
258
265
|
|
|
259
266
|
# NOTE: When debugging, change .value to .value!
|
|
260
267
|
primed_jid = Concurrent::Promises
|
|
261
268
|
.future(conn) { |red_con| pop_queued(red_con, timeout) }
|
|
262
|
-
.value
|
|
269
|
+
.value(concurrent_timeout) # Timeout to prevent indefinite blocking
|
|
263
270
|
|
|
271
|
+
# If promise times out, primed_jid will be nil
|
|
264
272
|
handle_primed(primed_jid, &block)
|
|
265
273
|
end
|
|
266
274
|
|
|
@@ -307,6 +315,16 @@ module SidekiqUniqueJobs
|
|
|
307
315
|
def brpoplpush(conn, wait)
|
|
308
316
|
# passing timeout 0 to brpoplpush causes it to block indefinitely
|
|
309
317
|
raise InvalidArgument, "wait must be an integer" unless wait.is_a?(Integer)
|
|
318
|
+
raise InvalidArgument, "wait must be positive" if wait.negative?
|
|
319
|
+
|
|
320
|
+
# Cap the wait time to prevent blocking requests too long
|
|
321
|
+
# This is especially important when called from client middleware
|
|
322
|
+
if wait > MAX_BLOCKING_WAIT
|
|
323
|
+
log_debug(
|
|
324
|
+
"Capping blocking wait from #{wait}s to #{MAX_BLOCKING_WAIT}s to prevent long request blocks",
|
|
325
|
+
)
|
|
326
|
+
wait = MAX_BLOCKING_WAIT
|
|
327
|
+
end
|
|
310
328
|
|
|
311
329
|
conn.blmove(key.queued, key.primed, "RIGHT", "LEFT", wait)
|
|
312
330
|
end
|
|
@@ -223,8 +223,8 @@ module SidekiqUniqueJobs
|
|
|
223
223
|
end
|
|
224
224
|
|
|
225
225
|
def fake_logger_context(_context)
|
|
226
|
-
logger.warn "Don't know how to setup the logging context. Please open a feature request:" \
|
|
227
|
-
"
|
|
226
|
+
logger.warn "Don't know how to setup the logging context. Please open a feature request: " \
|
|
227
|
+
"https://github.com/mhenrixon/sidekiq-unique-jobs/issues/new?template=feature_request.md"
|
|
228
228
|
|
|
229
229
|
yield
|
|
230
230
|
end
|
|
@@ -1,9 +1,11 @@
|
|
|
1
1
|
local function find_digest_in_process_set(digest, threshold)
|
|
2
2
|
local process_cursor = 0
|
|
3
3
|
local job_cursor = 0
|
|
4
|
-
local pattern = "*" .. digest .. "*"
|
|
5
4
|
local found = false
|
|
6
5
|
|
|
6
|
+
-- Cache digest transformation outside the loop - major performance win!
|
|
7
|
+
local digest_without_run = string.gsub(digest, ':RUN', '')
|
|
8
|
+
|
|
7
9
|
log_debug("Searching in process list",
|
|
8
10
|
"for digest:", digest,
|
|
9
11
|
"cursor:", process_cursor)
|
|
@@ -26,8 +28,11 @@ local function find_digest_in_process_set(digest, threshold)
|
|
|
26
28
|
log_debug("No entries in:", workers_key)
|
|
27
29
|
else
|
|
28
30
|
for i = 1, #jobs, 2 do
|
|
29
|
-
local jobstr = jobs[i +1]
|
|
30
|
-
|
|
31
|
+
local jobstr = jobs[i + 1]
|
|
32
|
+
-- Use cached digest transformation - avoid repeated string.gsub on digest
|
|
33
|
+
local jobstr_without_run = string.gsub(jobstr, ':RUN', '')
|
|
34
|
+
|
|
35
|
+
if string.find(jobstr_without_run, digest_without_run) then
|
|
31
36
|
log_debug("Found digest", digest, "in:", workers_key)
|
|
32
37
|
found = true
|
|
33
38
|
break
|
|
@@ -32,8 +32,19 @@ local function find_digest_in_queues(digest)
|
|
|
32
32
|
break
|
|
33
33
|
end
|
|
34
34
|
end
|
|
35
|
+
|
|
36
|
+
-- Short-circuit: Stop scanning this queue's batches after finding match
|
|
37
|
+
if found then
|
|
38
|
+
break
|
|
39
|
+
end
|
|
40
|
+
|
|
35
41
|
index = index + per
|
|
36
42
|
end
|
|
43
|
+
|
|
44
|
+
-- Short-circuit: Stop scanning remaining queues after finding match
|
|
45
|
+
if found then
|
|
46
|
+
break
|
|
47
|
+
end
|
|
37
48
|
end
|
|
38
49
|
|
|
39
50
|
cursor = next_cursor
|
|
@@ -1,12 +1,16 @@
|
|
|
1
1
|
local function find_digest_in_sorted_set(name, digest)
|
|
2
2
|
local cursor = 0
|
|
3
|
-
local count =
|
|
3
|
+
local count = 50
|
|
4
4
|
local pattern = "*" .. digest .. "*"
|
|
5
5
|
local found = false
|
|
6
6
|
|
|
7
7
|
log_debug("searching in:", name,
|
|
8
8
|
"for digest:", digest,
|
|
9
9
|
"cursor:", cursor)
|
|
10
|
+
|
|
11
|
+
-- Note: We must use pattern matching because sorted sets contain job JSON strings,
|
|
12
|
+
-- not just digests. The digest is embedded in the JSON as the "lock_digest" field.
|
|
13
|
+
-- ZSCORE won't work here as we need to search within the member content.
|
|
10
14
|
repeat
|
|
11
15
|
local pagination = redis.call("ZSCAN", name, cursor, "MATCH", pattern, "COUNT", count)
|
|
12
16
|
local next_cursor = pagination[1]
|
|
@@ -42,12 +42,25 @@ local locked_count = redis.call("HLEN", locked)
|
|
|
42
42
|
--------- Begin unlock.lua ---------
|
|
43
43
|
log_debug("BEGIN unlock digest:", digest, "(job_id: " .. job_id ..")")
|
|
44
44
|
|
|
45
|
-
|
|
46
|
-
if
|
|
47
|
-
|
|
45
|
+
-- Always clean up this job's queued/primed entries first
|
|
46
|
+
-- This prevents orphaned entries even if job doesn't hold the lock
|
|
47
|
+
log_debug("LREM", queued, -1, job_id)
|
|
48
|
+
redis.call("LREM", queued, -1, job_id)
|
|
49
|
+
|
|
50
|
+
log_debug("LREM", primed, -1, job_id)
|
|
51
|
+
redis.call("LREM", primed, -1, job_id)
|
|
52
|
+
|
|
53
|
+
-- Check if this job actually holds the lock
|
|
54
|
+
local holds_lock = redis.call("HEXISTS", locked, job_id) == 1
|
|
55
|
+
log_debug("HEXISTS", locked, job_id, "=>", holds_lock)
|
|
56
|
+
|
|
57
|
+
if not holds_lock then
|
|
58
|
+
-- Job doesn't hold the lock - check if this is an orphaned lock scenario
|
|
48
59
|
if queued_count == 0 and primed_count == 0 and locked_count == 0 then
|
|
49
|
-
log_debug("Orphaned lock")
|
|
60
|
+
log_debug("Orphaned lock - cleaning up")
|
|
61
|
+
-- Continue with cleanup below
|
|
50
62
|
else
|
|
63
|
+
-- Other jobs still hold locks for this digest
|
|
51
64
|
local result = ""
|
|
52
65
|
for i,v in ipairs(redis.call("HKEYS", locked)) do
|
|
53
66
|
result = result .. v .. ","
|
|
@@ -55,17 +68,12 @@ if redis.call("HEXISTS", locked, job_id) == 0 then
|
|
|
55
68
|
result = locked .. " (" .. result .. ")"
|
|
56
69
|
log("Yielding to: " .. result)
|
|
57
70
|
log_debug("Yielding to", result, locked, "by job", job_id)
|
|
58
|
-
return
|
|
71
|
+
-- Still return job_id to indicate cleanup completed
|
|
72
|
+
-- Caller already removed from queued/primed
|
|
73
|
+
return job_id
|
|
59
74
|
end
|
|
60
75
|
end
|
|
61
76
|
|
|
62
|
-
-- Just in case something went wrong
|
|
63
|
-
log_debug("LREM", queued, -1, job_id)
|
|
64
|
-
redis.call("LREM", queued, -1, job_id)
|
|
65
|
-
|
|
66
|
-
log_debug("LREM", primed, -1, job_id)
|
|
67
|
-
redis.call("LREM", primed, -1, job_id)
|
|
68
|
-
|
|
69
77
|
local redis_version = toversion(redisversion)
|
|
70
78
|
|
|
71
79
|
if lock_type ~= "until_expired" then
|
|
@@ -28,7 +28,16 @@ module SidekiqUniqueJobs
|
|
|
28
28
|
# @return [false] when Sidekiq::Deadset#kill does not take multiple arguments
|
|
29
29
|
#
|
|
30
30
|
def kill_with_options?
|
|
31
|
-
Sidekiq::DeadSet.instance_method(:kill).arity
|
|
31
|
+
kill_arity = Sidekiq::DeadSet.instance_method(:kill).arity
|
|
32
|
+
# Method#arity returns:
|
|
33
|
+
# 1. a nonnegative number for methods that take a fixed number of arguments.
|
|
34
|
+
# 2. A negative number if it takes a variable number of arguments.
|
|
35
|
+
# Keyword arguments are considered a single argument, and are considered optional unless one of the kwargs is
|
|
36
|
+
# required.
|
|
37
|
+
# Therefore, to determine if `Sidekiq::DeadSet#kill` accepts options beyond the single positional payload
|
|
38
|
+
# argument, we need to check whether the absolute value of the arity is greater than 1.
|
|
39
|
+
# See: https://apidock.com/ruby/Method/arity
|
|
40
|
+
kill_arity > 1 || kill_arity < -1
|
|
32
41
|
end
|
|
33
42
|
|
|
34
43
|
#
|
|
@@ -21,7 +21,7 @@ module SidekiqUniqueJobs
|
|
|
21
21
|
# @param [Hash] item sidekiq job hash
|
|
22
22
|
#
|
|
23
23
|
def initialize(item, redis_pool = nil)
|
|
24
|
-
super
|
|
24
|
+
super
|
|
25
25
|
@queue = item[QUEUE]
|
|
26
26
|
@lock_digest = item[LOCK_DIGEST]
|
|
27
27
|
end
|
|
@@ -54,8 +54,8 @@ module SidekiqUniqueJobs
|
|
|
54
54
|
#
|
|
55
55
|
def delete_job_by_digest
|
|
56
56
|
call_script(:delete_job_by_digest,
|
|
57
|
-
|
|
58
|
-
|
|
57
|
+
keys: ["#{QUEUE}:#{queue}", SCHEDULE, RETRY],
|
|
58
|
+
argv: [lock_digest])
|
|
59
59
|
end
|
|
60
60
|
|
|
61
61
|
#
|
|
@@ -33,8 +33,8 @@ module SidekiqUniqueJobs
|
|
|
33
33
|
|
|
34
34
|
strategies.fetch(strategy.to_sym) do
|
|
35
35
|
SidekiqUniqueJobs.logger.warn(
|
|
36
|
-
"No matching implementation for strategy: #{strategy}, returning OnConflict::NullStrategy." \
|
|
37
|
-
"
|
|
36
|
+
"No matching implementation for strategy: #{strategy}, returning OnConflict::NullStrategy. " \
|
|
37
|
+
"Available strategies are (#{strategies.inspect})",
|
|
38
38
|
)
|
|
39
39
|
|
|
40
40
|
OnConflict::NullStrategy
|
|
@@ -32,7 +32,7 @@ module SidekiqUniqueJobs
|
|
|
32
32
|
#
|
|
33
33
|
# @return [SidekiqUniqueJobs::TimerTask] the task that was started
|
|
34
34
|
#
|
|
35
|
-
def start(test_task = nil)
|
|
35
|
+
def start(test_task = nil)
|
|
36
36
|
return if disabled?
|
|
37
37
|
return if registered?
|
|
38
38
|
|
|
@@ -73,7 +73,7 @@ module SidekiqUniqueJobs
|
|
|
73
73
|
# @return [<type>] <description>
|
|
74
74
|
#
|
|
75
75
|
def task
|
|
76
|
-
@task ||= default_task # rubocop:disable ThreadSafety/
|
|
76
|
+
@task ||= default_task # rubocop:disable ThreadSafety/ClassInstanceVariable
|
|
77
77
|
end
|
|
78
78
|
|
|
79
79
|
#
|
|
@@ -101,7 +101,7 @@ module SidekiqUniqueJobs
|
|
|
101
101
|
# @return [void]
|
|
102
102
|
#
|
|
103
103
|
def task=(task)
|
|
104
|
-
@task = task # rubocop:disable ThreadSafety/
|
|
104
|
+
@task = task # rubocop:disable ThreadSafety/ClassInstanceVariable
|
|
105
105
|
end
|
|
106
106
|
|
|
107
107
|
#
|
|
@@ -9,7 +9,6 @@ module SidekiqUniqueJobs
|
|
|
9
9
|
#
|
|
10
10
|
# @author Mikael Henriksson <mikael@mhenrixon.com>
|
|
11
11
|
#
|
|
12
|
-
# rubocop:disable Metrics/ClassLength
|
|
13
12
|
class RubyReaper < Reaper
|
|
14
13
|
include SidekiqUniqueJobs::Timing
|
|
15
14
|
|
|
@@ -57,7 +56,7 @@ module SidekiqUniqueJobs
|
|
|
57
56
|
# @param [Redis] conn a connection to redis
|
|
58
57
|
#
|
|
59
58
|
def initialize(conn)
|
|
60
|
-
super
|
|
59
|
+
super
|
|
61
60
|
@digests = SidekiqUniqueJobs::Digests.new
|
|
62
61
|
@scheduled = Redis::SortedSet.new(SCHEDULE)
|
|
63
62
|
@retried = Redis::SortedSet.new(RETRY)
|
|
@@ -107,7 +106,7 @@ module SidekiqUniqueJobs
|
|
|
107
106
|
#
|
|
108
107
|
# @return [Array<String>] an array of orphaned digests
|
|
109
108
|
#
|
|
110
|
-
def orphans
|
|
109
|
+
def orphans
|
|
111
110
|
orphans = []
|
|
112
111
|
page = 0
|
|
113
112
|
per = reaper_count * 2
|
|
@@ -145,7 +144,10 @@ module SidekiqUniqueJobs
|
|
|
145
144
|
# 1. It checks the scheduled set
|
|
146
145
|
# 2. It checks the retry set
|
|
147
146
|
# 3. It goes through all queues
|
|
147
|
+
# 4. It checks active processes
|
|
148
148
|
#
|
|
149
|
+
# Note: Uses early returns for short-circuit evaluation.
|
|
150
|
+
# We can't pipeline ZSCAN operations as they're iterative.
|
|
149
151
|
#
|
|
150
152
|
# @param [String] digest the digest to search for
|
|
151
153
|
#
|
|
@@ -153,7 +155,17 @@ module SidekiqUniqueJobs
|
|
|
153
155
|
# @return [false] when no job was found for this digest
|
|
154
156
|
#
|
|
155
157
|
def belongs_to_job?(digest)
|
|
156
|
-
|
|
158
|
+
# Short-circuit: Return immediately if found in scheduled set
|
|
159
|
+
return true if scheduled?(digest)
|
|
160
|
+
|
|
161
|
+
# Short-circuit: Return immediately if found in retry set
|
|
162
|
+
return true if retried?(digest)
|
|
163
|
+
|
|
164
|
+
# Short-circuit: Return immediately if found in any queue
|
|
165
|
+
return true if enqueued?(digest)
|
|
166
|
+
|
|
167
|
+
# Last check: active processes
|
|
168
|
+
active?(digest)
|
|
157
169
|
end
|
|
158
170
|
|
|
159
171
|
#
|
|
@@ -197,7 +209,7 @@ module SidekiqUniqueJobs
|
|
|
197
209
|
end
|
|
198
210
|
end
|
|
199
211
|
|
|
200
|
-
def active?(digest)
|
|
212
|
+
def active?(digest)
|
|
201
213
|
Sidekiq.redis do |conn|
|
|
202
214
|
procs = conn.sscan("processes").to_a
|
|
203
215
|
return false if procs.empty?
|
|
@@ -219,10 +231,12 @@ module SidekiqUniqueJobs
|
|
|
219
231
|
workers.each_pair do |_tid, job|
|
|
220
232
|
next unless (item = safe_load_json(job))
|
|
221
233
|
|
|
222
|
-
|
|
234
|
+
next unless (raw_payload = item[PAYLOAD])
|
|
235
|
+
|
|
236
|
+
payload = safe_load_json(raw_payload)
|
|
223
237
|
|
|
224
238
|
return true if match?(digest, payload[LOCK_DIGEST])
|
|
225
|
-
return true if considered_active?(payload[CREATED_AT])
|
|
239
|
+
return true if considered_active?(time_from_payload_timestamp(payload[CREATED_AT]).to_f)
|
|
226
240
|
end
|
|
227
241
|
end
|
|
228
242
|
|
|
@@ -240,6 +254,15 @@ module SidekiqUniqueJobs
|
|
|
240
254
|
max_score < time_f
|
|
241
255
|
end
|
|
242
256
|
|
|
257
|
+
def time_from_payload_timestamp(timestamp)
|
|
258
|
+
if timestamp.is_a?(Float)
|
|
259
|
+
# < Sidekiq 8, timestamps were stored as fractional seconds since the epoch
|
|
260
|
+
Time.at(timestamp).utc
|
|
261
|
+
else
|
|
262
|
+
Time.at(timestamp / 1000, timestamp % 1000, :millisecond)
|
|
263
|
+
end
|
|
264
|
+
end
|
|
265
|
+
|
|
243
266
|
#
|
|
244
267
|
# Loops through all the redis queues and yields them one by one
|
|
245
268
|
#
|
|
@@ -253,7 +276,7 @@ module SidekiqUniqueJobs
|
|
|
253
276
|
conn.sscan("queues").each(&block)
|
|
254
277
|
end
|
|
255
278
|
|
|
256
|
-
def entries(conn, queue, &block)
|
|
279
|
+
def entries(conn, queue, &block)
|
|
257
280
|
queue_key = "queue:#{queue}"
|
|
258
281
|
initial_size = conn.llen(queue_key)
|
|
259
282
|
deleted_size = 0
|
|
@@ -297,6 +320,9 @@ module SidekiqUniqueJobs
|
|
|
297
320
|
#
|
|
298
321
|
# Checks a sorted set for the existance of this digest
|
|
299
322
|
#
|
|
323
|
+
# Note: Must use pattern matching because sorted sets contain job JSON strings,
|
|
324
|
+
# not just digests. The digest is embedded in the JSON as the "lock_digest" field.
|
|
325
|
+
# ZSCORE won't work here as we need to search within the member content.
|
|
300
326
|
#
|
|
301
327
|
# @param [String] key the key for the sorted set
|
|
302
328
|
# @param [String] digest the digest to scan for
|
|
@@ -305,7 +331,8 @@ module SidekiqUniqueJobs
|
|
|
305
331
|
# @return [false] when missing
|
|
306
332
|
#
|
|
307
333
|
def in_sorted_set?(key, digest)
|
|
308
|
-
|
|
334
|
+
# Increased count from 1 to 50 for better throughput
|
|
335
|
+
conn.zscan(key, match: "*#{digest}*", count: 50).to_a.any?
|
|
309
336
|
end
|
|
310
337
|
end
|
|
311
338
|
# rubocop:enable Metrics/ClassLength
|
|
@@ -50,15 +50,15 @@ module SidekiqUniqueJobs
|
|
|
50
50
|
#
|
|
51
51
|
# @return [void] <description>
|
|
52
52
|
#
|
|
53
|
-
def dispatch(reflection, *args)
|
|
53
|
+
def dispatch(reflection, *args)
|
|
54
54
|
if (block = @reflections[reflection])
|
|
55
55
|
block.call(*args)
|
|
56
56
|
|
|
57
57
|
if DEPRECATIONS.key?(reflection)
|
|
58
58
|
replacement, removal_version = DEPRECATIONS[reflection]
|
|
59
59
|
SidekiqUniqueJobs::Deprecation.warn(
|
|
60
|
-
"#{reflection} is deprecated and will be removed in version #{removal_version}." \
|
|
61
|
-
"
|
|
60
|
+
"#{reflection} is deprecated and will be removed in version #{removal_version}. " \
|
|
61
|
+
"Use #{replacement} instead.",
|
|
62
62
|
)
|
|
63
63
|
end
|
|
64
64
|
elsif misconfigured?(reflection)
|
|
@@ -43,9 +43,11 @@ module SidekiqUniqueJobs
|
|
|
43
43
|
#
|
|
44
44
|
# @return [HaveValidSidekiqOptions] an RSpec matcher
|
|
45
45
|
#
|
|
46
|
-
|
|
46
|
+
# rubocop:disable Naming/PredicatePrefix
|
|
47
|
+
def have_valid_sidekiq_options(*args)
|
|
47
48
|
HaveValidSidekiqOptions.new(*args)
|
|
48
49
|
end
|
|
50
|
+
# rubocop:enable Naming/PredicatePrefix
|
|
49
51
|
end
|
|
50
52
|
end
|
|
51
53
|
end
|