sidekiq-unique-jobs 7.1.6 → 7.1.30
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq-unique-jobs might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/CHANGELOG.md +312 -3
- data/README.md +19 -15
- data/lib/sidekiq_unique_jobs/batch_delete.rb +8 -7
- data/lib/sidekiq_unique_jobs/changelog.rb +3 -3
- data/lib/sidekiq_unique_jobs/cli.rb +33 -8
- data/lib/sidekiq_unique_jobs/config.rb +48 -4
- data/lib/sidekiq_unique_jobs/constants.rb +45 -44
- data/lib/sidekiq_unique_jobs/core_ext.rb +1 -1
- data/lib/sidekiq_unique_jobs/deprecation.rb +30 -0
- data/lib/sidekiq_unique_jobs/digests.rb +8 -11
- data/lib/sidekiq_unique_jobs/exceptions.rb +4 -3
- data/lib/sidekiq_unique_jobs/expiring_digests.rb +14 -0
- data/lib/sidekiq_unique_jobs/job.rb +5 -0
- data/lib/sidekiq_unique_jobs/json.rb +7 -0
- data/lib/sidekiq_unique_jobs/key.rb +13 -8
- data/lib/sidekiq_unique_jobs/lock/base_lock.rb +30 -23
- data/lib/sidekiq_unique_jobs/lock/until_and_while_executing.rb +9 -3
- data/lib/sidekiq_unique_jobs/lock/until_executed.rb +15 -4
- data/lib/sidekiq_unique_jobs/lock/until_executing.rb +14 -4
- data/lib/sidekiq_unique_jobs/lock/until_expired.rb +13 -5
- data/lib/sidekiq_unique_jobs/lock/while_executing.rb +10 -6
- data/lib/sidekiq_unique_jobs/lock.rb +27 -10
- data/lib/sidekiq_unique_jobs/lock_args.rb +19 -15
- data/lib/sidekiq_unique_jobs/lock_config.rb +6 -6
- data/lib/sidekiq_unique_jobs/lock_digest.rb +7 -7
- data/lib/sidekiq_unique_jobs/lock_info.rb +2 -2
- data/lib/sidekiq_unique_jobs/lock_timeout.rb +4 -4
- data/lib/sidekiq_unique_jobs/lock_ttl.rb +5 -5
- data/lib/sidekiq_unique_jobs/lock_type.rb +37 -0
- data/lib/sidekiq_unique_jobs/locksmith.rb +41 -10
- data/lib/sidekiq_unique_jobs/logging.rb +23 -0
- data/lib/sidekiq_unique_jobs/lua/lock.lua +18 -12
- data/lib/sidekiq_unique_jobs/lua/lock_until_expired.lua +92 -0
- data/lib/sidekiq_unique_jobs/lua/reap_orphans.lua +31 -3
- data/lib/sidekiq_unique_jobs/lua/shared/_common.lua +1 -1
- data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_process_set.lua +1 -1
- data/lib/sidekiq_unique_jobs/lua/unlock.lua +17 -5
- data/lib/sidekiq_unique_jobs/middleware/client.rb +3 -1
- data/lib/sidekiq_unique_jobs/middleware/server.rb +2 -0
- data/lib/sidekiq_unique_jobs/middleware.rb +4 -4
- data/lib/sidekiq_unique_jobs/on_conflict/reject.rb +4 -4
- data/lib/sidekiq_unique_jobs/on_conflict/reschedule.rb +3 -3
- data/lib/sidekiq_unique_jobs/options_with_fallback.rb +6 -8
- data/lib/sidekiq_unique_jobs/orphans/lua_reaper.rb +1 -1
- data/lib/sidekiq_unique_jobs/orphans/manager.rb +40 -12
- data/lib/sidekiq_unique_jobs/orphans/ruby_reaper.rb +99 -14
- data/lib/sidekiq_unique_jobs/redis/string.rb +3 -1
- data/lib/sidekiq_unique_jobs/reflectable.rb +11 -2
- data/lib/sidekiq_unique_jobs/reflections.rb +12 -1
- data/lib/sidekiq_unique_jobs/script/caller.rb +7 -7
- data/lib/sidekiq_unique_jobs/server.rb +13 -1
- data/lib/sidekiq_unique_jobs/sidekiq_unique_ext.rb +1 -1
- data/lib/sidekiq_unique_jobs/sidekiq_unique_jobs.rb +13 -3
- data/lib/sidekiq_unique_jobs/sidekiq_worker_methods.rb +40 -21
- data/lib/sidekiq_unique_jobs/testing.rb +53 -21
- data/lib/sidekiq_unique_jobs/timer_task.rb +266 -45
- data/lib/sidekiq_unique_jobs/timing.rb +1 -1
- data/lib/sidekiq_unique_jobs/upgrade_locks.rb +6 -6
- data/lib/sidekiq_unique_jobs/version.rb +1 -1
- data/lib/sidekiq_unique_jobs/web/helpers.rb +11 -1
- data/lib/sidekiq_unique_jobs/web/views/lock.erb +5 -3
- data/lib/sidekiq_unique_jobs/web.rb +22 -3
- data/lib/sidekiq_unique_jobs.rb +2 -0
- data/lib/tasks/changelog.rake +16 -16
- metadata +26 -13
@@ -1,9 +1,10 @@
|
|
1
1
|
redis.replicate_commands()
|
2
2
|
|
3
3
|
-------- BEGIN keys ---------
|
4
|
-
local digests_set
|
5
|
-
local
|
6
|
-
local
|
4
|
+
local digests_set = KEYS[1]
|
5
|
+
local expiring_digests_set = KEYS[2]
|
6
|
+
local schedule_set = KEYS[3]
|
7
|
+
local retry_set = KEYS[4]
|
7
8
|
-------- END keys ---------
|
8
9
|
|
9
10
|
-------- BEGIN argv ---------
|
@@ -90,5 +91,32 @@ repeat
|
|
90
91
|
index = index + per
|
91
92
|
until index >= total or del_count >= reaper_count
|
92
93
|
|
94
|
+
if del_count < reaper_count then
|
95
|
+
index = 0
|
96
|
+
total = redis.call("ZCOUNT", expiring_digests_set, 0, current_time)
|
97
|
+
repeat
|
98
|
+
local digests = redis.call("ZRANGEBYSCORE", expiring_digests_set, 0, current_time, "LIMIT", index, index + per -1)
|
99
|
+
|
100
|
+
for _, digest in pairs(digests) do
|
101
|
+
local queued = digest .. ":QUEUED"
|
102
|
+
local primed = digest .. ":PRIMED"
|
103
|
+
local locked = digest .. ":LOCKED"
|
104
|
+
local info = digest .. ":INFO"
|
105
|
+
local run_digest = digest .. ":RUN"
|
106
|
+
local run_queued = digest .. ":RUN:QUEUED"
|
107
|
+
local run_primed = digest .. ":RUN:PRIMED"
|
108
|
+
local run_locked = digest .. ":RUN:LOCKED"
|
109
|
+
local run_info = digest .. ":RUN:INFO"
|
110
|
+
|
111
|
+
redis.call(del_cmd, digest, queued, primed, locked, info, run_digest, run_queued, run_primed, run_locked, run_info)
|
112
|
+
|
113
|
+
redis.call("ZREM", expiring_digests_set, digest)
|
114
|
+
del_count = del_count + 1
|
115
|
+
end
|
116
|
+
|
117
|
+
index = index + per
|
118
|
+
until index >= total or del_count >= reaper_count
|
119
|
+
end
|
120
|
+
|
93
121
|
log_debug("END")
|
94
122
|
return del_count
|
@@ -31,7 +31,7 @@ local function log(message, prev_jid)
|
|
31
31
|
log_debug("ZADD", changelog, current_time, entry);
|
32
32
|
redis.call("ZADD", changelog, current_time, entry);
|
33
33
|
local total_entries = redis.call("ZCARD", changelog)
|
34
|
-
local removed_entries = redis.call("ZREMRANGEBYRANK", changelog,
|
34
|
+
local removed_entries = redis.call("ZREMRANGEBYRANK", changelog, 0, -1 * max_history)
|
35
35
|
if removed_entries > 0 then
|
36
36
|
log_debug("Removing", removed_entries , "entries from changelog (total entries", total_entries, "exceeds max_history:", max_history ..")");
|
37
37
|
end
|
@@ -15,7 +15,7 @@ local function find_digest_in_process_set(digest, threshold)
|
|
15
15
|
log_debug("Found number of processes:", #processes, "next cursor:", next_process_cursor)
|
16
16
|
|
17
17
|
for _, process in ipairs(processes) do
|
18
|
-
local workers_key = process .. ":
|
18
|
+
local workers_key = process .. ":work"
|
19
19
|
log_debug("searching in process set:", process,
|
20
20
|
"for digest:", digest,
|
21
21
|
"cursor:", process_cursor)
|
@@ -65,11 +65,6 @@ redis.call("LREM", queued, -1, job_id)
|
|
65
65
|
log_debug("LREM", primed, -1, job_id)
|
66
66
|
redis.call("LREM", primed, -1, job_id)
|
67
67
|
|
68
|
-
if limit and limit <= 1 and locked_count and locked_count <= 1 then
|
69
|
-
log_debug("ZREM", digests, digest)
|
70
|
-
redis.call("ZREM", digests, digest)
|
71
|
-
end
|
72
|
-
|
73
68
|
local redis_version = toversion(redisversion)
|
74
69
|
local del_cmd = "DEL"
|
75
70
|
|
@@ -83,6 +78,23 @@ if lock_type ~= "until_expired" then
|
|
83
78
|
redis.call("HDEL", locked, job_id)
|
84
79
|
end
|
85
80
|
|
81
|
+
local locked_count = redis.call("HLEN", locked)
|
82
|
+
|
83
|
+
if locked_count and locked_count < 1 then
|
84
|
+
log_debug(del_cmd, locked)
|
85
|
+
redis.call(del_cmd, locked)
|
86
|
+
end
|
87
|
+
|
88
|
+
if redis.call("LLEN", primed) == 0 then
|
89
|
+
log_debug(del_cmd, primed)
|
90
|
+
redis.call(del_cmd, primed)
|
91
|
+
end
|
92
|
+
|
93
|
+
if limit and limit <= 1 and locked_count and locked_count <= 1 then
|
94
|
+
log_debug("ZREM", digests, digest)
|
95
|
+
redis.call("ZREM", digests, digest)
|
96
|
+
end
|
97
|
+
|
86
98
|
log_debug("LPUSH", queued, "1")
|
87
99
|
redis.call("LPUSH", queued, "1")
|
88
100
|
|
@@ -6,6 +6,8 @@ module SidekiqUniqueJobs
|
|
6
6
|
#
|
7
7
|
# @author Mikael Henriksson <mikael@mhenrixon.com>
|
8
8
|
class Client
|
9
|
+
include Sidekiq::ClientMiddleware if defined?(Sidekiq::ClientMiddleware)
|
10
|
+
|
9
11
|
# prepend "SidekiqUniqueJobs::Middleware"
|
10
12
|
# @!parse prepends SidekiqUniqueJobs::Middleware
|
11
13
|
prepend SidekiqUniqueJobs::Middleware
|
@@ -30,7 +32,7 @@ module SidekiqUniqueJobs
|
|
30
32
|
private
|
31
33
|
|
32
34
|
def lock
|
33
|
-
lock_instance.lock do
|
35
|
+
lock_instance.lock do
|
34
36
|
reflect(:locked, item)
|
35
37
|
return yield
|
36
38
|
end
|
@@ -6,6 +6,8 @@ module SidekiqUniqueJobs
|
|
6
6
|
#
|
7
7
|
# @author Mikael Henriksson <mikael@mhenrixon.com>
|
8
8
|
class Server
|
9
|
+
include Sidekiq::ServerMiddleware if defined?(Sidekiq::ServerMiddleware)
|
10
|
+
|
9
11
|
# prepend "SidekiqUniqueJobs::Middleware"
|
10
12
|
# @!parse prepends SidekiqUniqueJobs::Middleware
|
11
13
|
prepend SidekiqUniqueJobs::Middleware
|
@@ -29,10 +29,10 @@ module SidekiqUniqueJobs
|
|
29
29
|
# @yieldparam [<type>] if <description>
|
30
30
|
# @yieldreturn [<type>] <describe what yield should return>
|
31
31
|
def call(worker_class, item, queue, redis_pool = nil)
|
32
|
-
@
|
33
|
-
@
|
34
|
-
@
|
35
|
-
|
32
|
+
@item = item
|
33
|
+
@queue = queue
|
34
|
+
@redis_pool = redis_pool
|
35
|
+
self.job_class = worker_class
|
36
36
|
return yield if unique_disabled?
|
37
37
|
|
38
38
|
SidekiqUniqueJobs::Job.prepare(item) unless item[LOCK_DIGEST]
|
@@ -96,10 +96,10 @@ module SidekiqUniqueJobs
|
|
96
96
|
#
|
97
97
|
def push_to_deadset
|
98
98
|
redis do |conn|
|
99
|
-
conn.multi do
|
100
|
-
|
101
|
-
|
102
|
-
|
99
|
+
conn.multi do |pipeline|
|
100
|
+
pipeline.zadd("dead", now_f, payload)
|
101
|
+
pipeline.zremrangebyscore("dead", "-inf", now_f - Sidekiq::DeadSet.timeout)
|
102
|
+
pipeline.zremrangebyrank("dead", 0, -Sidekiq::DeadSet.max_jobs)
|
103
103
|
end
|
104
104
|
end
|
105
105
|
end
|
@@ -14,14 +14,14 @@ module SidekiqUniqueJobs
|
|
14
14
|
# @param [Hash] item sidekiq job hash
|
15
15
|
def initialize(item, redis_pool = nil)
|
16
16
|
super(item, redis_pool)
|
17
|
-
|
17
|
+
self.job_class = item[CLASS]
|
18
18
|
end
|
19
19
|
|
20
20
|
# Create a new job from the current one.
|
21
21
|
# This will mess up sidekiq stats because a new job is created
|
22
22
|
def call
|
23
|
-
if
|
24
|
-
if
|
23
|
+
if sidekiq_job_class?
|
24
|
+
if job_class.set(queue: item["queue"].to_sym).perform_in(5, *item[ARGS])
|
25
25
|
reflect(:rescheduled, item)
|
26
26
|
else
|
27
27
|
reflect(:reschedule_failed, item)
|
@@ -6,7 +6,7 @@ module SidekiqUniqueJobs
|
|
6
6
|
# Requires the following methods to be defined in the including class
|
7
7
|
# 1. item (required)
|
8
8
|
# 2. options (can be nil)
|
9
|
-
# 3.
|
9
|
+
# 3. job_class (required, can be anything)
|
10
10
|
# @author Mikael Henriksson <mikael@mhenrixon.com>
|
11
11
|
module OptionsWithFallback
|
12
12
|
def self.included(base)
|
@@ -46,10 +46,8 @@ module SidekiqUniqueJobs
|
|
46
46
|
# @return [Class]
|
47
47
|
#
|
48
48
|
def lock_class
|
49
|
-
@lock_class ||=
|
50
|
-
|
51
|
-
raise UnknownLock, "No implementation for `lock: :#{lock_type}`"
|
52
|
-
end
|
49
|
+
@lock_class ||= locks.fetch(lock_type.to_sym) do
|
50
|
+
raise UnknownLock, "No implementation for `lock: :#{lock_type}`"
|
53
51
|
end
|
54
52
|
end
|
55
53
|
|
@@ -57,7 +55,7 @@ module SidekiqUniqueJobs
|
|
57
55
|
# The type of lock for this worker
|
58
56
|
#
|
59
57
|
#
|
60
|
-
# @return [Symbol]
|
58
|
+
# @return [Symbol, NilClass]
|
61
59
|
#
|
62
60
|
def lock_type
|
63
61
|
@lock_type ||= options[LOCK] || item[LOCK]
|
@@ -71,8 +69,8 @@ module SidekiqUniqueJobs
|
|
71
69
|
#
|
72
70
|
def options
|
73
71
|
@options ||= begin
|
74
|
-
opts =
|
75
|
-
opts.merge!(
|
72
|
+
opts = default_job_options.dup
|
73
|
+
opts.merge!(job_options) if sidekiq_job_class?
|
76
74
|
(opts || {}).stringify_keys
|
77
75
|
end
|
78
76
|
end
|
@@ -1,5 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require "concurrent/version"
|
4
|
+
|
3
5
|
module SidekiqUniqueJobs
|
4
6
|
module Orphans
|
5
7
|
#
|
@@ -10,10 +12,18 @@ module SidekiqUniqueJobs
|
|
10
12
|
module Manager
|
11
13
|
module_function
|
12
14
|
|
15
|
+
#
|
16
|
+
# @return [Float] the amount to add to the reaper interval
|
13
17
|
DRIFT_FACTOR = 0.02
|
18
|
+
#
|
19
|
+
# @return [Symbol] allowed reapers (:ruby or :lua)
|
14
20
|
REAPERS = [:ruby, :lua].freeze
|
15
21
|
|
22
|
+
# includes "SidekiqUniqueJobs::Connection"
|
23
|
+
# @!parse include SidekiqUniqueJobs::Connection
|
16
24
|
include SidekiqUniqueJobs::Connection
|
25
|
+
# includes "SidekiqUniqueJobs::Logging"
|
26
|
+
# @!parse include SidekiqUniqueJobs::Logging
|
17
27
|
include SidekiqUniqueJobs::Logging
|
18
28
|
|
19
29
|
#
|
@@ -62,9 +72,15 @@ module SidekiqUniqueJobs
|
|
62
72
|
# @return [<type>] <description>
|
63
73
|
#
|
64
74
|
def task
|
65
|
-
@task ||= default_task
|
75
|
+
@task ||= default_task # rubocop:disable ThreadSafety/InstanceVariableInClassMethod
|
66
76
|
end
|
67
77
|
|
78
|
+
#
|
79
|
+
# A properly configured timer task
|
80
|
+
#
|
81
|
+
#
|
82
|
+
# @return [SidekiqUniqueJobs::TimerTask]
|
83
|
+
#
|
68
84
|
def default_task
|
69
85
|
SidekiqUniqueJobs::TimerTask.new(timer_task_options) do
|
70
86
|
with_logging_context do
|
@@ -76,8 +92,15 @@ module SidekiqUniqueJobs
|
|
76
92
|
end
|
77
93
|
end
|
78
94
|
|
95
|
+
#
|
96
|
+
# Store a task to use for scheduled execution
|
97
|
+
#
|
98
|
+
# @param [SidekiqUniqueJobs::TimerTask] task the task to use
|
99
|
+
#
|
100
|
+
# @return [void]
|
101
|
+
#
|
79
102
|
def task=(task)
|
80
|
-
@task = task
|
103
|
+
@task = task # rubocop:disable ThreadSafety/InstanceVariableInClassMethod
|
81
104
|
end
|
82
105
|
|
83
106
|
#
|
@@ -87,9 +110,7 @@ module SidekiqUniqueJobs
|
|
87
110
|
# @return [Hash]
|
88
111
|
#
|
89
112
|
def timer_task_options
|
90
|
-
{ run_now: true,
|
91
|
-
execution_interval: reaper_interval,
|
92
|
-
timeout_interval: reaper_timeout }
|
113
|
+
{ run_now: true, execution_interval: reaper_interval }
|
93
114
|
end
|
94
115
|
|
95
116
|
#
|
@@ -106,13 +127,6 @@ module SidekiqUniqueJobs
|
|
106
127
|
SidekiqUniqueJobs.config.reaper_interval
|
107
128
|
end
|
108
129
|
|
109
|
-
#
|
110
|
-
# @see SidekiqUniqueJobs::Config#reaper_timeout
|
111
|
-
#
|
112
|
-
def reaper_timeout
|
113
|
-
SidekiqUniqueJobs.config.reaper_timeout
|
114
|
-
end
|
115
|
-
|
116
130
|
#
|
117
131
|
# A context to use for all log entries
|
118
132
|
#
|
@@ -201,10 +215,24 @@ module SidekiqUniqueJobs
|
|
201
215
|
redis { |conn| conn.del(UNIQUE_REAPER) }
|
202
216
|
end
|
203
217
|
|
218
|
+
#
|
219
|
+
# Reaper interval with a little drift
|
220
|
+
# Redis isn't exact enough so to give a little bufffer,
|
221
|
+
# we add a tiny value to the reaper interval.
|
222
|
+
#
|
223
|
+
#
|
224
|
+
# @return [Integer] <description>
|
225
|
+
#
|
204
226
|
def drift_reaper_interval
|
205
227
|
reaper_interval + (reaper_interval * DRIFT_FACTOR).to_i
|
206
228
|
end
|
207
229
|
|
230
|
+
#
|
231
|
+
# Current time (as integer value)
|
232
|
+
#
|
233
|
+
#
|
234
|
+
# @return [Integer]
|
235
|
+
#
|
208
236
|
def current_timestamp
|
209
237
|
Time.now.to_i
|
210
238
|
end
|
@@ -9,9 +9,17 @@ module SidekiqUniqueJobs
|
|
9
9
|
#
|
10
10
|
# @author Mikael Henriksson <mikael@mhenrixon.com>
|
11
11
|
#
|
12
|
+
# rubocop:disable Metrics/ClassLength
|
12
13
|
class RubyReaper < Reaper
|
14
|
+
include SidekiqUniqueJobs::Timing
|
15
|
+
|
16
|
+
#
|
17
|
+
# @return [String] the suffix for :RUN locks
|
13
18
|
RUN_SUFFIX = ":RUN"
|
14
19
|
#
|
20
|
+
# @return [Integer] the maximum combined length of sidekiq queues for running the reaper
|
21
|
+
MAX_QUEUE_LENGTH = 1000
|
22
|
+
#
|
15
23
|
# @!attribute [r] digests
|
16
24
|
# @return [SidekiqUniqueJobs::Digests] digest collection
|
17
25
|
attr_reader :digests
|
@@ -24,6 +32,22 @@ module SidekiqUniqueJobs
|
|
24
32
|
# @return [Redis::SortedSet] the Sidekiq RetrySet
|
25
33
|
attr_reader :retried
|
26
34
|
|
35
|
+
#
|
36
|
+
# @!attribute [r] start_time
|
37
|
+
# @return [Integer] The timestamp this execution started represented as Time (used for locks)
|
38
|
+
attr_reader :start_time
|
39
|
+
|
40
|
+
#
|
41
|
+
# @!attribute [r] start_time
|
42
|
+
# @return [Integer] The clock stamp this execution started represented as integer
|
43
|
+
# (used for redis compatibility as it is more accurate than time)
|
44
|
+
attr_reader :start_source
|
45
|
+
|
46
|
+
#
|
47
|
+
# @!attribute [r] timeout_ms
|
48
|
+
# @return [Integer] The allowed ms before timeout
|
49
|
+
attr_reader :timeout_ms
|
50
|
+
|
27
51
|
#
|
28
52
|
# Initialize a new instance of DeleteOrphans
|
29
53
|
#
|
@@ -31,9 +55,12 @@ module SidekiqUniqueJobs
|
|
31
55
|
#
|
32
56
|
def initialize(conn)
|
33
57
|
super(conn)
|
34
|
-
@digests
|
35
|
-
@scheduled
|
36
|
-
@retried
|
58
|
+
@digests = SidekiqUniqueJobs::Digests.new
|
59
|
+
@scheduled = Redis::SortedSet.new(SCHEDULE)
|
60
|
+
@retried = Redis::SortedSet.new(RETRY)
|
61
|
+
@start_time = Time.now
|
62
|
+
@start_source = time_source.call
|
63
|
+
@timeout_ms = SidekiqUniqueJobs.config.reaper_timeout * 1000
|
37
64
|
end
|
38
65
|
|
39
66
|
#
|
@@ -43,22 +70,59 @@ module SidekiqUniqueJobs
|
|
43
70
|
# @return [Integer] the number of reaped locks
|
44
71
|
#
|
45
72
|
def call
|
73
|
+
return if queues_very_full?
|
74
|
+
|
75
|
+
BatchDelete.call(expired_digests, conn)
|
46
76
|
BatchDelete.call(orphans, conn)
|
47
77
|
end
|
48
78
|
|
79
|
+
def expired_digests
|
80
|
+
max_score = (start_time - reaper_timeout).to_f
|
81
|
+
|
82
|
+
if VersionCheck.satisfied?(redis_version, ">= 6.2.0") && VersionCheck.satisfied?(::Redis::VERSION, ">= 4.6.0")
|
83
|
+
conn.zrange(EXPIRING_DIGESTS, 0, max_score, byscore: true)
|
84
|
+
else
|
85
|
+
conn.zrangebyscore(EXPIRING_DIGESTS, 0, max_score)
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
49
89
|
#
|
50
90
|
# Find orphaned digests
|
51
91
|
#
|
52
92
|
#
|
53
93
|
# @return [Array<String>] an array of orphaned digests
|
54
94
|
#
|
55
|
-
def orphans
|
56
|
-
|
57
|
-
|
95
|
+
def orphans # rubocop:disable Metrics/MethodLength, Metrics/CyclomaticComplexity
|
96
|
+
page = 0
|
97
|
+
per = reaper_count * 2
|
98
|
+
orphans = []
|
99
|
+
results = conn.zrange(digests.key, page * per, (page + 1) * per)
|
100
|
+
|
101
|
+
while results.size.positive?
|
102
|
+
results.each do |digest|
|
103
|
+
break if timeout?
|
104
|
+
next if belongs_to_job?(digest)
|
105
|
+
|
106
|
+
orphans << digest
|
107
|
+
break if orphans.size >= reaper_count
|
108
|
+
end
|
109
|
+
|
110
|
+
break if timeout?
|
111
|
+
break if orphans.size >= reaper_count
|
58
112
|
|
59
|
-
|
60
|
-
|
113
|
+
page += 1
|
114
|
+
results = conn.zrange(digests.key, page * per, (page + 1) * per)
|
61
115
|
end
|
116
|
+
|
117
|
+
orphans
|
118
|
+
end
|
119
|
+
|
120
|
+
def timeout?
|
121
|
+
elapsed_ms >= timeout_ms
|
122
|
+
end
|
123
|
+
|
124
|
+
def elapsed_ms
|
125
|
+
time_source.call - start_source
|
62
126
|
end
|
63
127
|
|
64
128
|
#
|
@@ -124,14 +188,14 @@ module SidekiqUniqueJobs
|
|
124
188
|
return false if procs.empty?
|
125
189
|
|
126
190
|
procs.sort.each do |key|
|
127
|
-
valid, workers = conn.pipelined do
|
191
|
+
valid, workers = conn.pipelined do |pipeline|
|
128
192
|
# TODO: Remove the if statement in the future
|
129
|
-
if
|
130
|
-
|
193
|
+
if pipeline.respond_to?(:exists?)
|
194
|
+
pipeline.exists?(key)
|
131
195
|
else
|
132
|
-
|
196
|
+
pipeline.exists(key)
|
133
197
|
end
|
134
|
-
|
198
|
+
pipeline.hgetall("#{key}:work")
|
135
199
|
end
|
136
200
|
|
137
201
|
next unless valid
|
@@ -182,7 +246,8 @@ module SidekiqUniqueJobs
|
|
182
246
|
page_size = 50
|
183
247
|
|
184
248
|
loop do
|
185
|
-
range_start = page * page_size - deleted_size
|
249
|
+
range_start = (page * page_size) - deleted_size
|
250
|
+
|
186
251
|
range_end = range_start + page_size - 1
|
187
252
|
entries = conn.lrange(queue_key, range_start, range_end)
|
188
253
|
page += 1
|
@@ -192,7 +257,26 @@ module SidekiqUniqueJobs
|
|
192
257
|
entries.each(&block)
|
193
258
|
|
194
259
|
deleted_size = initial_size - conn.llen(queue_key)
|
260
|
+
|
261
|
+
# The queue is growing, not shrinking, just keep looping
|
262
|
+
deleted_size = 0 if deleted_size.negative?
|
263
|
+
end
|
264
|
+
end
|
265
|
+
|
266
|
+
# If sidekiq queues are very full, it becomes highly inefficient for the reaper
|
267
|
+
# because it must check every queued job to verify a digest is safe to delete
|
268
|
+
# The reaper checks queued jobs in batches of 50, adding 2 reads per digest
|
269
|
+
# With a queue length of 1,000 jobs, that's over 20 extra reads per digest.
|
270
|
+
def queues_very_full?
|
271
|
+
total_queue_size = 0
|
272
|
+
Sidekiq.redis do |conn|
|
273
|
+
queues(conn) do |queue|
|
274
|
+
total_queue_size += conn.llen("queue:#{queue}")
|
275
|
+
|
276
|
+
return true if total_queue_size > MAX_QUEUE_LENGTH
|
277
|
+
end
|
195
278
|
end
|
279
|
+
false
|
196
280
|
end
|
197
281
|
|
198
282
|
#
|
@@ -209,5 +293,6 @@ module SidekiqUniqueJobs
|
|
209
293
|
conn.zscan_each(key, match: "*#{digest}*", count: 1).to_a.any?
|
210
294
|
end
|
211
295
|
end
|
296
|
+
# rubocop:enable Metrics/ClassLength
|
212
297
|
end
|
213
298
|
end
|
@@ -7,11 +7,20 @@ module SidekiqUniqueJobs
|
|
7
7
|
# @author Mikael Henriksson <mikael@mhenrixon.com>
|
8
8
|
#
|
9
9
|
module Reflectable
|
10
|
-
|
11
|
-
|
10
|
+
#
|
11
|
+
# Reflects on specific event
|
12
|
+
#
|
13
|
+
# @param [Symbol] reflection the reflected event
|
14
|
+
# @param [Array] args arguments to provide to reflector
|
15
|
+
#
|
16
|
+
# @return [void]
|
17
|
+
#
|
18
|
+
def reflect(reflection, *args)
|
19
|
+
SidekiqUniqueJobs.reflections.dispatch(reflection, *args)
|
12
20
|
nil
|
13
21
|
rescue UniqueJobsError => ex
|
14
22
|
SidekiqUniqueJobs.logger.error(ex)
|
23
|
+
nil
|
15
24
|
end
|
16
25
|
end
|
17
26
|
end
|
@@ -42,6 +42,14 @@ module SidekiqUniqueJobs
|
|
42
42
|
@reflections = {}
|
43
43
|
end
|
44
44
|
|
45
|
+
#
|
46
|
+
# Dispatch a reflected event
|
47
|
+
#
|
48
|
+
# @param [reflection] reflection the reflected event
|
49
|
+
# @param [Array] args the arguments to provide to the block
|
50
|
+
#
|
51
|
+
# @return [void] <description>
|
52
|
+
#
|
45
53
|
def dispatch(reflection, *args)
|
46
54
|
if (block = @reflections[reflection])
|
47
55
|
block.call(*args)
|
@@ -49,12 +57,15 @@ module SidekiqUniqueJobs
|
|
49
57
|
if DEPRECATIONS.key?(reflection)
|
50
58
|
replacement, removal_version = DEPRECATIONS[reflection]
|
51
59
|
SidekiqUniqueJobs::Deprecation.warn(
|
52
|
-
"#{reflection} is deprecated and will be removed in version #{removal_version}.
|
60
|
+
"#{reflection} is deprecated and will be removed in version #{removal_version}." \
|
61
|
+
" Use #{replacement} instead.",
|
53
62
|
)
|
54
63
|
end
|
55
64
|
elsif misconfigured?(reflection)
|
56
65
|
raise NoSuchNotificationError, reflection
|
57
66
|
end
|
67
|
+
|
68
|
+
nil
|
58
69
|
end
|
59
70
|
|
60
71
|
def configured?(reflection)
|
@@ -54,13 +54,13 @@ module SidekiqUniqueJobs
|
|
54
54
|
# Only used to reduce a little bit of duplication
|
55
55
|
# @see call_script
|
56
56
|
def do_call(file_name, conn, keys, argv)
|
57
|
-
argv = argv.dup.
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
57
|
+
argv = argv.dup.push(
|
58
|
+
now_f,
|
59
|
+
debug_lua,
|
60
|
+
max_history,
|
61
|
+
file_name,
|
62
|
+
redis_version,
|
63
|
+
)
|
64
64
|
Script.execute(file_name, conn, keys: keys, argv: argv)
|
65
65
|
end
|
66
66
|
|
@@ -5,7 +5,7 @@ module SidekiqUniqueJobs
|
|
5
5
|
#
|
6
6
|
# @author Mikael Henriksson <mikael@mhenrixon.com>
|
7
7
|
class Server
|
8
|
-
DEATH_HANDLER
|
8
|
+
DEATH_HANDLER = (lambda do |job, _ex|
|
9
9
|
return unless (digest = job["lock_digest"])
|
10
10
|
|
11
11
|
SidekiqUniqueJobs::Digests.new.delete_by_digest(digest)
|
@@ -25,6 +25,12 @@ module SidekiqUniqueJobs
|
|
25
25
|
config.death_handlers << death_handler
|
26
26
|
end
|
27
27
|
|
28
|
+
#
|
29
|
+
# Start the sidekiq unique jobs server process
|
30
|
+
#
|
31
|
+
#
|
32
|
+
# @return [void]
|
33
|
+
#
|
28
34
|
def self.start
|
29
35
|
SidekiqUniqueJobs::UpdateVersion.call
|
30
36
|
SidekiqUniqueJobs::UpgradeLocks.call
|
@@ -32,6 +38,12 @@ module SidekiqUniqueJobs
|
|
32
38
|
SidekiqUniqueJobs::Orphans::ReaperResurrector.start
|
33
39
|
end
|
34
40
|
|
41
|
+
#
|
42
|
+
# Stop the sidekiq unique jobs server process
|
43
|
+
#
|
44
|
+
#
|
45
|
+
# @return [void]
|
46
|
+
#
|
35
47
|
def self.stop
|
36
48
|
SidekiqUniqueJobs::Orphans::Manager.stop
|
37
49
|
end
|