sidekiq-unique-jobs 7.1.19 → 7.1.29

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq-unique-jobs might be problematic. Click here for more details.

Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +144 -1
  3. data/README.md +7 -3
  4. data/lib/sidekiq_unique_jobs/batch_delete.rb +1 -0
  5. data/lib/sidekiq_unique_jobs/cli.rb +33 -8
  6. data/lib/sidekiq_unique_jobs/config.rb +5 -0
  7. data/lib/sidekiq_unique_jobs/constants.rb +1 -0
  8. data/lib/sidekiq_unique_jobs/digests.rb +2 -2
  9. data/lib/sidekiq_unique_jobs/exceptions.rb +3 -3
  10. data/lib/sidekiq_unique_jobs/expiring_digests.rb +14 -0
  11. data/lib/sidekiq_unique_jobs/key.rb +13 -8
  12. data/lib/sidekiq_unique_jobs/lock/until_executing.rb +4 -0
  13. data/lib/sidekiq_unique_jobs/lock.rb +18 -1
  14. data/lib/sidekiq_unique_jobs/lock_args.rb +18 -14
  15. data/lib/sidekiq_unique_jobs/lock_config.rb +4 -4
  16. data/lib/sidekiq_unique_jobs/lock_digest.rb +7 -7
  17. data/lib/sidekiq_unique_jobs/lock_timeout.rb +4 -4
  18. data/lib/sidekiq_unique_jobs/lock_ttl.rb +4 -4
  19. data/lib/sidekiq_unique_jobs/locksmith.rb +29 -9
  20. data/lib/sidekiq_unique_jobs/logging.rb +14 -0
  21. data/lib/sidekiq_unique_jobs/lua/lock.lua +15 -9
  22. data/lib/sidekiq_unique_jobs/lua/lock_until_expired.lua +92 -0
  23. data/lib/sidekiq_unique_jobs/lua/reap_orphans.lua +31 -3
  24. data/lib/sidekiq_unique_jobs/lua/unlock.lua +5 -0
  25. data/lib/sidekiq_unique_jobs/middleware/client.rb +2 -0
  26. data/lib/sidekiq_unique_jobs/middleware/server.rb +2 -0
  27. data/lib/sidekiq_unique_jobs/middleware.rb +4 -4
  28. data/lib/sidekiq_unique_jobs/on_conflict/reschedule.rb +3 -3
  29. data/lib/sidekiq_unique_jobs/options_with_fallback.rb +3 -3
  30. data/lib/sidekiq_unique_jobs/orphans/lua_reaper.rb +1 -1
  31. data/lib/sidekiq_unique_jobs/orphans/manager.rb +1 -14
  32. data/lib/sidekiq_unique_jobs/orphans/ruby_reaper.rb +50 -4
  33. data/lib/sidekiq_unique_jobs/sidekiq_unique_jobs.rb +10 -0
  34. data/lib/sidekiq_unique_jobs/sidekiq_worker_methods.rb +33 -24
  35. data/lib/sidekiq_unique_jobs/testing.rb +31 -13
  36. data/lib/sidekiq_unique_jobs/timer_task.rb +19 -77
  37. data/lib/sidekiq_unique_jobs/version.rb +1 -1
  38. data/lib/sidekiq_unique_jobs/web/helpers.rb +10 -0
  39. data/lib/sidekiq_unique_jobs/web.rb +22 -3
  40. data/lib/sidekiq_unique_jobs.rb +1 -0
  41. data/lib/tasks/changelog.rake +1 -1
  42. metadata +21 -5
@@ -32,6 +32,7 @@ module SidekiqUniqueJobs
32
32
  #
33
33
  # @return [Float] used to take into consideration the inaccuracy of redis timestamps
34
34
  CLOCK_DRIFT_FACTOR = 0.01
35
+ NETWORK_FACTOR = 0.04
35
36
 
36
37
  #
37
38
  # @!attribute [r] key
@@ -184,22 +185,23 @@ module SidekiqUniqueJobs
184
185
  #
185
186
  # @param [Sidekiq::RedisConnection, ConnectionPool] conn the redis connection
186
187
  # @param [Method] primed_method reference to the method to use for getting a primed token
188
+ # @param [nil, Integer, Float] time to wait before timeout
187
189
  #
188
190
  # @yieldparam [string] job_id the sidekiq JID
189
191
  # @yieldreturn [void] whatever the calling block returns
190
192
  def lock!(conn, primed_method, wait = nil)
191
- return yield job_id if locked?(conn)
193
+ return yield if locked?(conn)
192
194
 
193
195
  enqueue(conn) do |queued_jid|
194
- reflect(:debug, item, queued_jid)
196
+ reflect(:debug, :queued, item, queued_jid)
195
197
 
196
198
  primed_method.call(conn, wait) do |primed_jid|
197
199
  reflect(:debug, :primed, item, primed_jid)
198
-
199
200
  locked_jid = call_script(:lock, key.to_a, argv, conn)
201
+
200
202
  if locked_jid
201
203
  reflect(:debug, :locked, item, locked_jid)
202
- return yield job_id
204
+ return yield
203
205
  end
204
206
  end
205
207
  end
@@ -239,9 +241,18 @@ module SidekiqUniqueJobs
239
241
  # @return [Object] whatever the block returns when lock was acquired
240
242
  #
241
243
  def primed_async(conn, wait = nil, &block)
244
+ timeout = (wait || config.timeout).to_i
245
+ timeout = 1 if timeout.zero?
246
+
247
+ brpoplpush_timeout = timeout
248
+ concurrent_timeout = add_drift(timeout)
249
+
250
+ reflect(:debug, :timeouts, item,
251
+ timeouts: { brpoplpush_timeout: brpoplpush_timeout, concurrent_timeout: concurrent_timeout })
252
+
242
253
  primed_jid = Concurrent::Promises
243
- .future(conn) { |red_con| pop_queued(red_con, wait) }
244
- .value(add_drift(wait || config.timeout))
254
+ .future(conn) { |red_con| pop_queued(red_con, timeout) }
255
+ .value
245
256
 
246
257
  handle_primed(primed_jid, &block)
247
258
  end
@@ -273,7 +284,7 @@ module SidekiqUniqueJobs
273
284
  #
274
285
  # @return [String] a previously enqueued token (now taken off the queue)
275
286
  #
276
- def pop_queued(conn, wait = nil)
287
+ def pop_queued(conn, wait = 1)
277
288
  wait ||= config.timeout if config.wait_for_lock?
278
289
 
279
290
  if wait.nil?
@@ -287,10 +298,15 @@ module SidekiqUniqueJobs
287
298
  # @api private
288
299
  #
289
300
  def brpoplpush(conn, wait)
301
+ # passing timeout 0 to brpoplpush causes it to block indefinitely
290
302
  raise InvalidArgument, "wait must be an integer" unless wait.is_a?(Integer)
303
+ return conn.brpoplpush(key.queued, key.primed, wait) if conn.class.to_s == "Redis::Namespace"
291
304
 
292
- # passing timeout 0 to brpoplpush causes it to block indefinitely
293
- conn.brpoplpush(key.queued, key.primed, timeout: wait)
305
+ if VersionCheck.satisfied?(redis_version, ">= 6.2.0") && conn.respond_to?(:blmove)
306
+ conn.blmove(key.queued, key.primed, "RIGHT", "LEFT", timeout: wait)
307
+ else
308
+ conn.brpoplpush(key.queued, key.primed, timeout: wait)
309
+ end
294
310
  end
295
311
 
296
312
  #
@@ -359,5 +375,9 @@ module SidekiqUniqueJobs
359
375
  TIME => now_f,
360
376
  )
361
377
  end
378
+
379
+ def redis_version
380
+ @redis_version ||= SidekiqUniqueJobs.config.redis_version
381
+ end
362
382
  end
363
383
  end
@@ -30,6 +30,8 @@ module SidekiqUniqueJobs
30
30
  # @yield [String, Exception] the message or exception to use for log message
31
31
  #
32
32
  def log_debug(message_or_exception = nil, item = nil, &block)
33
+ return unless logging?
34
+
33
35
  message = build_message(message_or_exception, item)
34
36
  logger.debug(message, &block)
35
37
  nil
@@ -45,6 +47,8 @@ module SidekiqUniqueJobs
45
47
  # @yield [String, Exception] the message or exception to use for log message
46
48
  #
47
49
  def log_info(message_or_exception = nil, item = nil, &block)
50
+ return unless logging?
51
+
48
52
  message = build_message(message_or_exception, item)
49
53
  logger.info(message, &block)
50
54
  nil
@@ -60,6 +64,8 @@ module SidekiqUniqueJobs
60
64
  # @yield [String, Exception] the message or exception to use for log message
61
65
  #
62
66
  def log_warn(message_or_exception = nil, item = nil, &block)
67
+ return unless logging?
68
+
63
69
  message = build_message(message_or_exception, item)
64
70
  logger.warn(message, &block)
65
71
  nil
@@ -75,6 +81,8 @@ module SidekiqUniqueJobs
75
81
  # @yield [String, Exception] the message or exception to use for log message
76
82
  #
77
83
  def log_error(message_or_exception = nil, item = nil, &block)
84
+ return unless logging?
85
+
78
86
  message = build_message(message_or_exception, item)
79
87
  logger.error(message, &block)
80
88
  nil
@@ -90,6 +98,8 @@ module SidekiqUniqueJobs
90
98
  # @yield [String, Exception] the message or exception to use for log message
91
99
  #
92
100
  def log_fatal(message_or_exception = nil, item = nil, &block)
101
+ return unless logging?
102
+
93
103
  message = build_message(message_or_exception, item)
94
104
  logger.fatal(message, &block)
95
105
 
@@ -218,5 +228,9 @@ module SidekiqUniqueJobs
218
228
 
219
229
  yield
220
230
  end
231
+
232
+ def logging?
233
+ SidekiqUniqueJobs.logging?
234
+ end
221
235
  end
222
236
  end
@@ -1,11 +1,12 @@
1
1
  -------- BEGIN keys ---------
2
- local digest = KEYS[1]
3
- local queued = KEYS[2]
4
- local primed = KEYS[3]
5
- local locked = KEYS[4]
6
- local info = KEYS[5]
7
- local changelog = KEYS[6]
8
- local digests = KEYS[7]
2
+ local digest = KEYS[1]
3
+ local queued = KEYS[2]
4
+ local primed = KEYS[3]
5
+ local locked = KEYS[4]
6
+ local info = KEYS[5]
7
+ local changelog = KEYS[6]
8
+ local digests = KEYS[7]
9
+ local expiring_digests = KEYS[8]
9
10
  -------- END keys ---------
10
11
 
11
12
 
@@ -57,8 +58,13 @@ if limit_exceeded then
57
58
  return nil
58
59
  end
59
60
 
60
- log_debug("ZADD", digests, current_time, digest)
61
- redis.call("ZADD", digests, current_time, digest)
61
+ if lock_type == "until_expired" and pttl and pttl > 0 then
62
+ log_debug("ZADD", expiring_digests, current_time + pttl, digest)
63
+ redis.call("ZADD", expiring_digests, current_time + pttl, digest)
64
+ else
65
+ log_debug("ZADD", digests, current_time, digest)
66
+ redis.call("ZADD", digests, current_time, digest)
67
+ end
62
68
 
63
69
  log_debug("HSET", locked, job_id, current_time)
64
70
  redis.call("HSET", locked, job_id, current_time)
@@ -0,0 +1,92 @@
1
+ -------- BEGIN keys ---------
2
+ local digest = KEYS[1]
3
+ local queued = KEYS[2]
4
+ local primed = KEYS[3]
5
+ local locked = KEYS[4]
6
+ local info = KEYS[5]
7
+ local changelog = KEYS[6]
8
+ local digests = KEYS[7]
9
+ local expiring_digests = KEYS[8]
10
+ -------- END keys ---------
11
+
12
+
13
+ -------- BEGIN lock arguments ---------
14
+ local job_id = ARGV[1]
15
+ local pttl = tonumber(ARGV[2])
16
+ local lock_type = ARGV[3]
17
+ local limit = tonumber(ARGV[4])
18
+ -------- END lock arguments -----------
19
+
20
+
21
+ -------- BEGIN injected arguments --------
22
+ local current_time = tonumber(ARGV[5])
23
+ local debug_lua = ARGV[6] == "true"
24
+ local max_history = tonumber(ARGV[7])
25
+ local script_name = tostring(ARGV[8]) .. ".lua"
26
+ local redisversion = ARGV[9]
27
+ --------- END injected arguments ---------
28
+
29
+
30
+ -------- BEGIN local functions --------
31
+ <%= include_partial "shared/_common.lua" %>
32
+ ---------- END local functions ----------
33
+
34
+
35
+ --------- BEGIN lock.lua ---------
36
+ log_debug("BEGIN lock digest:", digest, "job_id:", job_id)
37
+
38
+ if redis.call("HEXISTS", locked, job_id) == 1 then
39
+ log_debug(locked, "already locked with job_id:", job_id)
40
+ log("Duplicate")
41
+
42
+ log_debug("LREM", queued, -1, job_id)
43
+ redis.call("LREM", queued, -1, job_id)
44
+
45
+ log_debug("LREM", primed, 1, job_id)
46
+ redis.call("LREM", primed, 1, job_id)
47
+
48
+ return job_id
49
+ end
50
+
51
+ local locked_count = redis.call("HLEN", locked)
52
+ local within_limit = limit > locked_count
53
+ local limit_exceeded = not within_limit
54
+
55
+ if limit_exceeded then
56
+ log_debug("Limit exceeded:", digest, "(", locked_count, "of", limit, ")")
57
+ log("Limited")
58
+ return nil
59
+ end
60
+
61
+ log_debug("ZADD", expiring_digests, current_time + pttl, digest)
62
+ redis.call("ZADD", expiring_digests, current_time + pttl, digest)
63
+
64
+ log_debug("HSET", locked, job_id, current_time)
65
+ redis.call("HSET", locked, job_id, current_time)
66
+
67
+ log_debug("LREM", queued, -1, job_id)
68
+ redis.call("LREM", queued, -1, job_id)
69
+
70
+ log_debug("LREM", primed, 1, job_id)
71
+ redis.call("LREM", primed, 1, job_id)
72
+
73
+ -- The Sidekiq client sets pttl
74
+ log_debug("PEXPIRE", digest, pttl)
75
+ redis.call("PEXPIRE", digest, pttl)
76
+
77
+ log_debug("PEXPIRE", locked, pttl)
78
+ redis.call("PEXPIRE", locked, pttl)
79
+
80
+ log_debug("PEXPIRE", info, pttl)
81
+ redis.call("PEXPIRE", info, pttl)
82
+
83
+ log_debug("PEXPIRE", queued, 1000)
84
+ redis.call("PEXPIRE", queued, 1000)
85
+
86
+ log_debug("PEXPIRE", primed, 1000)
87
+ redis.call("PEXPIRE", primed, 1000)
88
+
89
+ log("Locked")
90
+ log_debug("END lock digest:", digest, "job_id:", job_id)
91
+ return job_id
92
+ ---------- END lock.lua ----------
@@ -1,9 +1,10 @@
1
1
  redis.replicate_commands()
2
2
 
3
3
  -------- BEGIN keys ---------
4
- local digests_set = KEYS[1]
5
- local schedule_set = KEYS[2]
6
- local retry_set = KEYS[3]
4
+ local digests_set = KEYS[1]
5
+ local expiring_digests_set = KEYS[2]
6
+ local schedule_set = KEYS[3]
7
+ local retry_set = KEYS[4]
7
8
  -------- END keys ---------
8
9
 
9
10
  -------- BEGIN argv ---------
@@ -90,5 +91,32 @@ repeat
90
91
  index = index + per
91
92
  until index >= total or del_count >= reaper_count
92
93
 
94
+ if del_count < reaper_count then
95
+ index = 0
96
+ total = redis.call("ZCOUNT", expiring_digests_set, 0, current_time)
97
+ repeat
98
+ local digests = redis.call("ZRANGEBYSCORE", expiring_digests_set, 0, current_time, "LIMIT", index, index + per -1)
99
+
100
+ for _, digest in pairs(digests) do
101
+ local queued = digest .. ":QUEUED"
102
+ local primed = digest .. ":PRIMED"
103
+ local locked = digest .. ":LOCKED"
104
+ local info = digest .. ":INFO"
105
+ local run_digest = digest .. ":RUN"
106
+ local run_queued = digest .. ":RUN:QUEUED"
107
+ local run_primed = digest .. ":RUN:PRIMED"
108
+ local run_locked = digest .. ":RUN:LOCKED"
109
+ local run_info = digest .. ":RUN:INFO"
110
+
111
+ redis.call(del_cmd, digest, queued, primed, locked, info, run_digest, run_queued, run_primed, run_locked, run_info)
112
+
113
+ redis.call("ZREM", expiring_digests_set, digest)
114
+ del_count = del_count + 1
115
+ end
116
+
117
+ index = index + per
118
+ until index >= total or del_count >= reaper_count
119
+ end
120
+
93
121
  log_debug("END")
94
122
  return del_count
@@ -85,6 +85,11 @@ if locked_count and locked_count < 1 then
85
85
  redis.call(del_cmd, locked)
86
86
  end
87
87
 
88
+ if redis.call("LLEN", primed) == 0 then
89
+ log_debug(del_cmd, primed)
90
+ redis.call(del_cmd, primed)
91
+ end
92
+
88
93
  if limit and limit <= 1 and locked_count and locked_count <= 1 then
89
94
  log_debug("ZREM", digests, digest)
90
95
  redis.call("ZREM", digests, digest)
@@ -6,6 +6,8 @@ module SidekiqUniqueJobs
6
6
  #
7
7
  # @author Mikael Henriksson <mikael@mhenrixon.com>
8
8
  class Client
9
+ include Sidekiq::ClientMiddleware if defined?(Sidekiq::ClientMiddleware)
10
+
9
11
  # prepend "SidekiqUniqueJobs::Middleware"
10
12
  # @!parse prepends SidekiqUniqueJobs::Middleware
11
13
  prepend SidekiqUniqueJobs::Middleware
@@ -6,6 +6,8 @@ module SidekiqUniqueJobs
6
6
  #
7
7
  # @author Mikael Henriksson <mikael@mhenrixon.com>
8
8
  class Server
9
+ include Sidekiq::ServerMiddleware if defined?(Sidekiq::ServerMiddleware)
10
+
9
11
  # prepend "SidekiqUniqueJobs::Middleware"
10
12
  # @!parse prepends SidekiqUniqueJobs::Middleware
11
13
  prepend SidekiqUniqueJobs::Middleware
@@ -29,10 +29,10 @@ module SidekiqUniqueJobs
29
29
  # @yieldparam [<type>] if <description>
30
30
  # @yieldreturn [<type>] <describe what yield should return>
31
31
  def call(worker_class, item, queue, redis_pool = nil)
32
- @worker_class = worker_class
33
- @item = item
34
- @queue = queue
35
- @redis_pool = redis_pool
32
+ @item = item
33
+ @queue = queue
34
+ @redis_pool = redis_pool
35
+ self.job_class = worker_class
36
36
  return yield if unique_disabled?
37
37
 
38
38
  SidekiqUniqueJobs::Job.prepare(item) unless item[LOCK_DIGEST]
@@ -14,14 +14,14 @@ module SidekiqUniqueJobs
14
14
  # @param [Hash] item sidekiq job hash
15
15
  def initialize(item, redis_pool = nil)
16
16
  super(item, redis_pool)
17
- @worker_class = item[CLASS]
17
+ self.job_class = item[CLASS]
18
18
  end
19
19
 
20
20
  # Create a new job from the current one.
21
21
  # This will mess up sidekiq stats because a new job is created
22
22
  def call
23
- if sidekiq_worker_class?
24
- if worker_class.set(queue: item["queue"].to_sym).perform_in(5, *item[ARGS])
23
+ if sidekiq_job_class?
24
+ if job_class.set(queue: item["queue"].to_sym).perform_in(5, *item[ARGS])
25
25
  reflect(:rescheduled, item)
26
26
  else
27
27
  reflect(:reschedule_failed, item)
@@ -6,7 +6,7 @@ module SidekiqUniqueJobs
6
6
  # Requires the following methods to be defined in the including class
7
7
  # 1. item (required)
8
8
  # 2. options (can be nil)
9
- # 3. worker_class (required, can be anything)
9
+ # 3. job_class (required, can be anything)
10
10
  # @author Mikael Henriksson <mikael@mhenrixon.com>
11
11
  module OptionsWithFallback
12
12
  def self.included(base)
@@ -69,8 +69,8 @@ module SidekiqUniqueJobs
69
69
  #
70
70
  def options
71
71
  @options ||= begin
72
- opts = default_worker_options.dup
73
- opts.merge!(worker_options) if sidekiq_worker_class?
72
+ opts = default_job_options.dup
73
+ opts.merge!(job_options) if sidekiq_job_class?
74
74
  (opts || {}).stringify_keys
75
75
  end
76
76
  end
@@ -20,7 +20,7 @@ module SidekiqUniqueJobs
20
20
  call_script(
21
21
  :reap_orphans,
22
22
  conn,
23
- keys: [DIGESTS, SCHEDULE, RETRY, PROCESSES],
23
+ keys: [DIGESTS, EXPIRING_DIGESTS, SCHEDULE, RETRY, PROCESSES],
24
24
  argv: [reaper_count, (Time.now - reaper_timeout).to_f],
25
25
  )
26
26
  end
@@ -110,13 +110,7 @@ module SidekiqUniqueJobs
110
110
  # @return [Hash]
111
111
  #
112
112
  def timer_task_options
113
- timer_task_options = { run_now: true, execution_interval: reaper_interval }
114
-
115
- if VersionCheck.satisfied?(::Concurrent::VERSION, "< 1.1.10")
116
- timer_task_options[:timeout_interval] = reaper_timeout
117
- end
118
-
119
- timer_task_options
113
+ { run_now: true, execution_interval: reaper_interval }
120
114
  end
121
115
 
122
116
  #
@@ -133,13 +127,6 @@ module SidekiqUniqueJobs
133
127
  SidekiqUniqueJobs.config.reaper_interval
134
128
  end
135
129
 
136
- #
137
- # @see SidekiqUniqueJobs::Config#reaper_timeout
138
- #
139
- def reaper_timeout
140
- SidekiqUniqueJobs.config.reaper_timeout
141
- end
142
-
143
130
  #
144
131
  # A context to use for all log entries
145
132
  #
@@ -11,6 +11,8 @@ module SidekiqUniqueJobs
11
11
  #
12
12
  # rubocop:disable Metrics/ClassLength
13
13
  class RubyReaper < Reaper
14
+ include SidekiqUniqueJobs::Timing
15
+
14
16
  #
15
17
  # @return [String] the suffix for :RUN locks
16
18
  RUN_SUFFIX = ":RUN"
@@ -30,6 +32,22 @@ module SidekiqUniqueJobs
30
32
  # @return [Redis::SortedSet] the Sidekiq RetrySet
31
33
  attr_reader :retried
32
34
 
35
+ #
36
+ # @!attribute [r] start_time
37
+ # @return [Integer] The timestamp this execution started represented as Time (used for locks)
38
+ attr_reader :start_time
39
+
40
+ #
41
+ # @!attribute [r] start_time
42
+ # @return [Integer] The clock stamp this execution started represented as integer
43
+ # (used for redis compatibility as it is more accurate than time)
44
+ attr_reader :start_source
45
+
46
+ #
47
+ # @!attribute [r] timeout_ms
48
+ # @return [Integer] The allowed ms before timeout
49
+ attr_reader :timeout_ms
50
+
33
51
  #
34
52
  # Initialize a new instance of DeleteOrphans
35
53
  #
@@ -37,9 +55,12 @@ module SidekiqUniqueJobs
37
55
  #
38
56
  def initialize(conn)
39
57
  super(conn)
40
- @digests = SidekiqUniqueJobs::Digests.new
41
- @scheduled = Redis::SortedSet.new(SCHEDULE)
42
- @retried = Redis::SortedSet.new(RETRY)
58
+ @digests = SidekiqUniqueJobs::Digests.new
59
+ @scheduled = Redis::SortedSet.new(SCHEDULE)
60
+ @retried = Redis::SortedSet.new(RETRY)
61
+ @start_time = Time.now
62
+ @start_source = time_source.call
63
+ @timeout_ms = SidekiqUniqueJobs.config.reaper_timeout * 1000
43
64
  end
44
65
 
45
66
  #
@@ -51,16 +72,27 @@ module SidekiqUniqueJobs
51
72
  def call
52
73
  return if queues_very_full?
53
74
 
75
+ BatchDelete.call(expired_digests, conn)
54
76
  BatchDelete.call(orphans, conn)
55
77
  end
56
78
 
79
+ def expired_digests
80
+ max_score = (start_time - reaper_timeout).to_f
81
+
82
+ if VersionCheck.satisfied?(redis_version, ">= 6.2.0") && VersionCheck.satisfied?(::Redis::VERSION, ">= 4.6.0")
83
+ conn.zrange(EXPIRING_DIGESTS, 0, max_score, byscore: true)
84
+ else
85
+ conn.zrangebyscore(EXPIRING_DIGESTS, 0, max_score)
86
+ end
87
+ end
88
+
57
89
  #
58
90
  # Find orphaned digests
59
91
  #
60
92
  #
61
93
  # @return [Array<String>] an array of orphaned digests
62
94
  #
63
- def orphans # rubocop:disable Metrics/MethodLength
95
+ def orphans # rubocop:disable Metrics/MethodLength, Metrics/CyclomaticComplexity
64
96
  page = 0
65
97
  per = reaper_count * 2
66
98
  orphans = []
@@ -68,12 +100,14 @@ module SidekiqUniqueJobs
68
100
 
69
101
  while results.size.positive?
70
102
  results.each do |digest|
103
+ break if timeout?
71
104
  next if belongs_to_job?(digest)
72
105
 
73
106
  orphans << digest
74
107
  break if orphans.size >= reaper_count
75
108
  end
76
109
 
110
+ break if timeout?
77
111
  break if orphans.size >= reaper_count
78
112
 
79
113
  page += 1
@@ -83,6 +117,14 @@ module SidekiqUniqueJobs
83
117
  orphans
84
118
  end
85
119
 
120
+ def timeout?
121
+ elapsed_ms >= timeout_ms
122
+ end
123
+
124
+ def elapsed_ms
125
+ time_source.call - start_source
126
+ end
127
+
86
128
  #
87
129
  # Checks if the digest has a matching job.
88
130
  # 1. It checks the scheduled set
@@ -205,6 +247,7 @@ module SidekiqUniqueJobs
205
247
 
206
248
  loop do
207
249
  range_start = (page * page_size) - deleted_size
250
+
208
251
  range_end = range_start + page_size - 1
209
252
  entries = conn.lrange(queue_key, range_start, range_end)
210
253
  page += 1
@@ -214,6 +257,9 @@ module SidekiqUniqueJobs
214
257
  entries.each(&block)
215
258
 
216
259
  deleted_size = initial_size - conn.llen(queue_key)
260
+
261
+ # The queue is growing, not shrinking, just keep looping
262
+ deleted_size = 0 if deleted_size.negative?
217
263
  end
218
264
  end
219
265
 
@@ -71,6 +71,16 @@ module SidekiqUniqueJobs # rubocop:disable Metrics/ModuleLength
71
71
  config.logger = other
72
72
  end
73
73
 
74
+ #
75
+ # Check if logging is enabled
76
+ #
77
+ #
78
+ # @return [true, false]
79
+ #
80
+ def logging?
81
+ config.logger_enabled
82
+ end
83
+
74
84
  #
75
85
  # Temporarily use another configuration and reset to the old config after yielding
76
86
  #