sidekiq-unique-jobs 7.1.8 → 7.1.29

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq-unique-jobs might be problematic. Click here for more details.

Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +283 -2
  3. data/README.md +17 -14
  4. data/lib/sidekiq_unique_jobs/batch_delete.rb +7 -6
  5. data/lib/sidekiq_unique_jobs/changelog.rb +3 -3
  6. data/lib/sidekiq_unique_jobs/cli.rb +33 -8
  7. data/lib/sidekiq_unique_jobs/config.rb +5 -0
  8. data/lib/sidekiq_unique_jobs/constants.rb +1 -0
  9. data/lib/sidekiq_unique_jobs/digests.rb +5 -5
  10. data/lib/sidekiq_unique_jobs/exceptions.rb +3 -3
  11. data/lib/sidekiq_unique_jobs/expiring_digests.rb +14 -0
  12. data/lib/sidekiq_unique_jobs/key.rb +13 -8
  13. data/lib/sidekiq_unique_jobs/lock/until_executed.rb +5 -1
  14. data/lib/sidekiq_unique_jobs/lock/until_executing.rb +4 -0
  15. data/lib/sidekiq_unique_jobs/lock/until_expired.rb +3 -1
  16. data/lib/sidekiq_unique_jobs/lock/while_executing.rb +4 -1
  17. data/lib/sidekiq_unique_jobs/lock.rb +27 -10
  18. data/lib/sidekiq_unique_jobs/lock_args.rb +19 -15
  19. data/lib/sidekiq_unique_jobs/lock_config.rb +6 -6
  20. data/lib/sidekiq_unique_jobs/lock_digest.rb +7 -7
  21. data/lib/sidekiq_unique_jobs/lock_info.rb +2 -2
  22. data/lib/sidekiq_unique_jobs/lock_timeout.rb +4 -4
  23. data/lib/sidekiq_unique_jobs/lock_ttl.rb +4 -4
  24. data/lib/sidekiq_unique_jobs/locksmith.rb +30 -9
  25. data/lib/sidekiq_unique_jobs/logging.rb +14 -0
  26. data/lib/sidekiq_unique_jobs/lua/lock.lua +15 -9
  27. data/lib/sidekiq_unique_jobs/lua/lock_until_expired.lua +92 -0
  28. data/lib/sidekiq_unique_jobs/lua/reap_orphans.lua +31 -3
  29. data/lib/sidekiq_unique_jobs/lua/shared/_common.lua +1 -1
  30. data/lib/sidekiq_unique_jobs/lua/unlock.lua +11 -7
  31. data/lib/sidekiq_unique_jobs/middleware/client.rb +2 -0
  32. data/lib/sidekiq_unique_jobs/middleware/server.rb +2 -0
  33. data/lib/sidekiq_unique_jobs/middleware.rb +4 -4
  34. data/lib/sidekiq_unique_jobs/on_conflict/reject.rb +4 -4
  35. data/lib/sidekiq_unique_jobs/on_conflict/reschedule.rb +3 -3
  36. data/lib/sidekiq_unique_jobs/options_with_fallback.rb +3 -3
  37. data/lib/sidekiq_unique_jobs/orphans/lua_reaper.rb +1 -1
  38. data/lib/sidekiq_unique_jobs/orphans/manager.rb +3 -10
  39. data/lib/sidekiq_unique_jobs/orphans/ruby_reaper.rb +96 -13
  40. data/lib/sidekiq_unique_jobs/redis/string.rb +3 -1
  41. data/lib/sidekiq_unique_jobs/sidekiq_unique_ext.rb +1 -1
  42. data/lib/sidekiq_unique_jobs/sidekiq_unique_jobs.rb +10 -0
  43. data/lib/sidekiq_unique_jobs/sidekiq_worker_methods.rb +40 -21
  44. data/lib/sidekiq_unique_jobs/testing.rb +53 -21
  45. data/lib/sidekiq_unique_jobs/timer_task.rb +266 -45
  46. data/lib/sidekiq_unique_jobs/timing.rb +1 -1
  47. data/lib/sidekiq_unique_jobs/upgrade_locks.rb +6 -6
  48. data/lib/sidekiq_unique_jobs/version.rb +1 -1
  49. data/lib/sidekiq_unique_jobs/web/helpers.rb +11 -1
  50. data/lib/sidekiq_unique_jobs/web.rb +22 -3
  51. data/lib/sidekiq_unique_jobs.rb +1 -0
  52. data/lib/tasks/changelog.rake +1 -1
  53. metadata +23 -11
@@ -1,9 +1,10 @@
1
1
  redis.replicate_commands()
2
2
 
3
3
  -------- BEGIN keys ---------
4
- local digests_set = KEYS[1]
5
- local schedule_set = KEYS[2]
6
- local retry_set = KEYS[3]
4
+ local digests_set = KEYS[1]
5
+ local expiring_digests_set = KEYS[2]
6
+ local schedule_set = KEYS[3]
7
+ local retry_set = KEYS[4]
7
8
  -------- END keys ---------
8
9
 
9
10
  -------- BEGIN argv ---------
@@ -90,5 +91,32 @@ repeat
90
91
  index = index + per
91
92
  until index >= total or del_count >= reaper_count
92
93
 
94
+ if del_count < reaper_count then
95
+ index = 0
96
+ total = redis.call("ZCOUNT", expiring_digests_set, 0, current_time)
97
+ repeat
98
+ local digests = redis.call("ZRANGEBYSCORE", expiring_digests_set, 0, current_time, "LIMIT", index, index + per -1)
99
+
100
+ for _, digest in pairs(digests) do
101
+ local queued = digest .. ":QUEUED"
102
+ local primed = digest .. ":PRIMED"
103
+ local locked = digest .. ":LOCKED"
104
+ local info = digest .. ":INFO"
105
+ local run_digest = digest .. ":RUN"
106
+ local run_queued = digest .. ":RUN:QUEUED"
107
+ local run_primed = digest .. ":RUN:PRIMED"
108
+ local run_locked = digest .. ":RUN:LOCKED"
109
+ local run_info = digest .. ":RUN:INFO"
110
+
111
+ redis.call(del_cmd, digest, queued, primed, locked, info, run_digest, run_queued, run_primed, run_locked, run_info)
112
+
113
+ redis.call("ZREM", expiring_digests_set, digest)
114
+ del_count = del_count + 1
115
+ end
116
+
117
+ index = index + per
118
+ until index >= total or del_count >= reaper_count
119
+ end
120
+
93
121
  log_debug("END")
94
122
  return del_count
@@ -31,7 +31,7 @@ local function log(message, prev_jid)
31
31
  log_debug("ZADD", changelog, current_time, entry);
32
32
  redis.call("ZADD", changelog, current_time, entry);
33
33
  local total_entries = redis.call("ZCARD", changelog)
34
- local removed_entries = redis.call("ZREMRANGEBYRANK", changelog, max_history, -1)
34
+ local removed_entries = redis.call("ZREMRANGEBYRANK", changelog, 0, -1 * max_history)
35
35
  if removed_entries > 0 then
36
36
  log_debug("Removing", removed_entries , "entries from changelog (total entries", total_entries, "exceeds max_history:", max_history ..")");
37
37
  end
@@ -65,18 +65,12 @@ redis.call("LREM", queued, -1, job_id)
65
65
  log_debug("LREM", primed, -1, job_id)
66
66
  redis.call("LREM", primed, -1, job_id)
67
67
 
68
- if limit and limit <= 1 and locked_count and locked_count <= 1 then
69
- log_debug("ZREM", digests, digest)
70
- redis.call("ZREM", digests, digest)
71
- end
72
-
73
68
  local redis_version = toversion(redisversion)
74
69
  local del_cmd = "DEL"
75
70
 
76
71
  if tonumber(redis_version["major"]) >= 4 then del_cmd = "UNLINK"; end
77
72
 
78
73
  if lock_type ~= "until_expired" then
79
-
80
74
  log_debug(del_cmd, digest, info)
81
75
  redis.call(del_cmd, digest, info)
82
76
 
@@ -86,11 +80,21 @@ end
86
80
 
87
81
  local locked_count = redis.call("HLEN", locked)
88
82
 
89
- if tonumber(locked_count) < 1 then
83
+ if locked_count and locked_count < 1 then
90
84
  log_debug(del_cmd, locked)
91
85
  redis.call(del_cmd, locked)
92
86
  end
93
87
 
88
+ if redis.call("LLEN", primed) == 0 then
89
+ log_debug(del_cmd, primed)
90
+ redis.call(del_cmd, primed)
91
+ end
92
+
93
+ if limit and limit <= 1 and locked_count and locked_count <= 1 then
94
+ log_debug("ZREM", digests, digest)
95
+ redis.call("ZREM", digests, digest)
96
+ end
97
+
94
98
  log_debug("LPUSH", queued, "1")
95
99
  redis.call("LPUSH", queued, "1")
96
100
 
@@ -6,6 +6,8 @@ module SidekiqUniqueJobs
6
6
  #
7
7
  # @author Mikael Henriksson <mikael@mhenrixon.com>
8
8
  class Client
9
+ include Sidekiq::ClientMiddleware if defined?(Sidekiq::ClientMiddleware)
10
+
9
11
  # prepend "SidekiqUniqueJobs::Middleware"
10
12
  # @!parse prepends SidekiqUniqueJobs::Middleware
11
13
  prepend SidekiqUniqueJobs::Middleware
@@ -6,6 +6,8 @@ module SidekiqUniqueJobs
6
6
  #
7
7
  # @author Mikael Henriksson <mikael@mhenrixon.com>
8
8
  class Server
9
+ include Sidekiq::ServerMiddleware if defined?(Sidekiq::ServerMiddleware)
10
+
9
11
  # prepend "SidekiqUniqueJobs::Middleware"
10
12
  # @!parse prepends SidekiqUniqueJobs::Middleware
11
13
  prepend SidekiqUniqueJobs::Middleware
@@ -29,10 +29,10 @@ module SidekiqUniqueJobs
29
29
  # @yieldparam [<type>] if <description>
30
30
  # @yieldreturn [<type>] <describe what yield should return>
31
31
  def call(worker_class, item, queue, redis_pool = nil)
32
- @worker_class = worker_class
33
- @item = item
34
- @queue = queue
35
- @redis_pool = redis_pool
32
+ @item = item
33
+ @queue = queue
34
+ @redis_pool = redis_pool
35
+ self.job_class = worker_class
36
36
  return yield if unique_disabled?
37
37
 
38
38
  SidekiqUniqueJobs::Job.prepare(item) unless item[LOCK_DIGEST]
@@ -96,10 +96,10 @@ module SidekiqUniqueJobs
96
96
  #
97
97
  def push_to_deadset
98
98
  redis do |conn|
99
- conn.multi do
100
- conn.zadd("dead", now_f, payload)
101
- conn.zremrangebyscore("dead", "-inf", now_f - Sidekiq::DeadSet.timeout)
102
- conn.zremrangebyrank("dead", 0, -Sidekiq::DeadSet.max_jobs)
99
+ conn.multi do |pipeline|
100
+ pipeline.zadd("dead", now_f, payload)
101
+ pipeline.zremrangebyscore("dead", "-inf", now_f - Sidekiq::DeadSet.timeout)
102
+ pipeline.zremrangebyrank("dead", 0, -Sidekiq::DeadSet.max_jobs)
103
103
  end
104
104
  end
105
105
  end
@@ -14,14 +14,14 @@ module SidekiqUniqueJobs
14
14
  # @param [Hash] item sidekiq job hash
15
15
  def initialize(item, redis_pool = nil)
16
16
  super(item, redis_pool)
17
- @worker_class = item[CLASS]
17
+ self.job_class = item[CLASS]
18
18
  end
19
19
 
20
20
  # Create a new job from the current one.
21
21
  # This will mess up sidekiq stats because a new job is created
22
22
  def call
23
- if sidekiq_worker_class?
24
- if worker_class.perform_in(5, *item[ARGS])
23
+ if sidekiq_job_class?
24
+ if job_class.set(queue: item["queue"].to_sym).perform_in(5, *item[ARGS])
25
25
  reflect(:rescheduled, item)
26
26
  else
27
27
  reflect(:reschedule_failed, item)
@@ -6,7 +6,7 @@ module SidekiqUniqueJobs
6
6
  # Requires the following methods to be defined in the including class
7
7
  # 1. item (required)
8
8
  # 2. options (can be nil)
9
- # 3. worker_class (required, can be anything)
9
+ # 3. job_class (required, can be anything)
10
10
  # @author Mikael Henriksson <mikael@mhenrixon.com>
11
11
  module OptionsWithFallback
12
12
  def self.included(base)
@@ -69,8 +69,8 @@ module SidekiqUniqueJobs
69
69
  #
70
70
  def options
71
71
  @options ||= begin
72
- opts = default_worker_options.dup
73
- opts.merge!(worker_options) if sidekiq_worker_class?
72
+ opts = default_job_options.dup
73
+ opts.merge!(job_options) if sidekiq_job_class?
74
74
  (opts || {}).stringify_keys
75
75
  end
76
76
  end
@@ -20,7 +20,7 @@ module SidekiqUniqueJobs
20
20
  call_script(
21
21
  :reap_orphans,
22
22
  conn,
23
- keys: [DIGESTS, SCHEDULE, RETRY, PROCESSES],
23
+ keys: [DIGESTS, EXPIRING_DIGESTS, SCHEDULE, RETRY, PROCESSES],
24
24
  argv: [reaper_count, (Time.now - reaper_timeout).to_f],
25
25
  )
26
26
  end
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "concurrent/version"
4
+
3
5
  module SidekiqUniqueJobs
4
6
  module Orphans
5
7
  #
@@ -108,9 +110,7 @@ module SidekiqUniqueJobs
108
110
  # @return [Hash]
109
111
  #
110
112
  def timer_task_options
111
- { run_now: true,
112
- execution_interval: reaper_interval,
113
- timeout_interval: reaper_timeout }
113
+ { run_now: true, execution_interval: reaper_interval }
114
114
  end
115
115
 
116
116
  #
@@ -127,13 +127,6 @@ module SidekiqUniqueJobs
127
127
  SidekiqUniqueJobs.config.reaper_interval
128
128
  end
129
129
 
130
- #
131
- # @see SidekiqUniqueJobs::Config#reaper_timeout
132
- #
133
- def reaper_timeout
134
- SidekiqUniqueJobs.config.reaper_timeout
135
- end
136
-
137
130
  #
138
131
  # A context to use for all log entries
139
132
  #
@@ -9,11 +9,17 @@ module SidekiqUniqueJobs
9
9
  #
10
10
  # @author Mikael Henriksson <mikael@mhenrixon.com>
11
11
  #
12
+ # rubocop:disable Metrics/ClassLength
12
13
  class RubyReaper < Reaper
14
+ include SidekiqUniqueJobs::Timing
15
+
13
16
  #
14
17
  # @return [String] the suffix for :RUN locks
15
18
  RUN_SUFFIX = ":RUN"
16
19
  #
20
+ # @return [Integer] the maximum combined length of sidekiq queues for running the reaper
21
+ MAX_QUEUE_LENGTH = 1000
22
+ #
17
23
  # @!attribute [r] digests
18
24
  # @return [SidekiqUniqueJobs::Digests] digest collection
19
25
  attr_reader :digests
@@ -26,6 +32,22 @@ module SidekiqUniqueJobs
26
32
  # @return [Redis::SortedSet] the Sidekiq RetrySet
27
33
  attr_reader :retried
28
34
 
35
+ #
36
+ # @!attribute [r] start_time
37
+ # @return [Integer] The timestamp this execution started represented as Time (used for locks)
38
+ attr_reader :start_time
39
+
40
+ #
41
+ # @!attribute [r] start_time
42
+ # @return [Integer] The clock stamp this execution started represented as integer
43
+ # (used for redis compatibility as it is more accurate than time)
44
+ attr_reader :start_source
45
+
46
+ #
47
+ # @!attribute [r] timeout_ms
48
+ # @return [Integer] The allowed ms before timeout
49
+ attr_reader :timeout_ms
50
+
29
51
  #
30
52
  # Initialize a new instance of DeleteOrphans
31
53
  #
@@ -33,9 +55,12 @@ module SidekiqUniqueJobs
33
55
  #
34
56
  def initialize(conn)
35
57
  super(conn)
36
- @digests = SidekiqUniqueJobs::Digests.new
37
- @scheduled = Redis::SortedSet.new(SCHEDULE)
38
- @retried = Redis::SortedSet.new(RETRY)
58
+ @digests = SidekiqUniqueJobs::Digests.new
59
+ @scheduled = Redis::SortedSet.new(SCHEDULE)
60
+ @retried = Redis::SortedSet.new(RETRY)
61
+ @start_time = Time.now
62
+ @start_source = time_source.call
63
+ @timeout_ms = SidekiqUniqueJobs.config.reaper_timeout * 1000
39
64
  end
40
65
 
41
66
  #
@@ -45,22 +70,59 @@ module SidekiqUniqueJobs
45
70
  # @return [Integer] the number of reaped locks
46
71
  #
47
72
  def call
73
+ return if queues_very_full?
74
+
75
+ BatchDelete.call(expired_digests, conn)
48
76
  BatchDelete.call(orphans, conn)
49
77
  end
50
78
 
79
+ def expired_digests
80
+ max_score = (start_time - reaper_timeout).to_f
81
+
82
+ if VersionCheck.satisfied?(redis_version, ">= 6.2.0") && VersionCheck.satisfied?(::Redis::VERSION, ">= 4.6.0")
83
+ conn.zrange(EXPIRING_DIGESTS, 0, max_score, byscore: true)
84
+ else
85
+ conn.zrangebyscore(EXPIRING_DIGESTS, 0, max_score)
86
+ end
87
+ end
88
+
51
89
  #
52
90
  # Find orphaned digests
53
91
  #
54
92
  #
55
93
  # @return [Array<String>] an array of orphaned digests
56
94
  #
57
- def orphans
58
- conn.zrevrange(digests.key, 0, -1).each_with_object([]) do |digest, memo|
59
- next if belongs_to_job?(digest)
95
+ def orphans # rubocop:disable Metrics/MethodLength, Metrics/CyclomaticComplexity
96
+ page = 0
97
+ per = reaper_count * 2
98
+ orphans = []
99
+ results = conn.zrange(digests.key, page * per, (page + 1) * per)
100
+
101
+ while results.size.positive?
102
+ results.each do |digest|
103
+ break if timeout?
104
+ next if belongs_to_job?(digest)
105
+
106
+ orphans << digest
107
+ break if orphans.size >= reaper_count
108
+ end
60
109
 
61
- memo << digest
62
- break if memo.size >= reaper_count
110
+ break if timeout?
111
+ break if orphans.size >= reaper_count
112
+
113
+ page += 1
114
+ results = conn.zrange(digests.key, page * per, (page + 1) * per)
63
115
  end
116
+
117
+ orphans
118
+ end
119
+
120
+ def timeout?
121
+ elapsed_ms >= timeout_ms
122
+ end
123
+
124
+ def elapsed_ms
125
+ time_source.call - start_source
64
126
  end
65
127
 
66
128
  #
@@ -126,14 +188,14 @@ module SidekiqUniqueJobs
126
188
  return false if procs.empty?
127
189
 
128
190
  procs.sort.each do |key|
129
- valid, workers = conn.pipelined do
191
+ valid, workers = conn.pipelined do |pipeline|
130
192
  # TODO: Remove the if statement in the future
131
- if conn.respond_to?(:exists?)
132
- conn.exists?(key)
193
+ if pipeline.respond_to?(:exists?)
194
+ pipeline.exists?(key)
133
195
  else
134
- conn.exists(key)
196
+ pipeline.exists(key)
135
197
  end
136
- conn.hgetall("#{key}:workers")
198
+ pipeline.hgetall("#{key}:workers")
137
199
  end
138
200
 
139
201
  next unless valid
@@ -185,6 +247,7 @@ module SidekiqUniqueJobs
185
247
 
186
248
  loop do
187
249
  range_start = (page * page_size) - deleted_size
250
+
188
251
  range_end = range_start + page_size - 1
189
252
  entries = conn.lrange(queue_key, range_start, range_end)
190
253
  page += 1
@@ -194,7 +257,26 @@ module SidekiqUniqueJobs
194
257
  entries.each(&block)
195
258
 
196
259
  deleted_size = initial_size - conn.llen(queue_key)
260
+
261
+ # The queue is growing, not shrinking, just keep looping
262
+ deleted_size = 0 if deleted_size.negative?
263
+ end
264
+ end
265
+
266
+ # If sidekiq queues are very full, it becomes highly inefficient for the reaper
267
+ # because it must check every queued job to verify a digest is safe to delete
268
+ # The reaper checks queued jobs in batches of 50, adding 2 reads per digest
269
+ # With a queue length of 1,000 jobs, that's over 20 extra reads per digest.
270
+ def queues_very_full?
271
+ total_queue_size = 0
272
+ Sidekiq.redis do |conn|
273
+ queues(conn) do |queue|
274
+ total_queue_size += conn.llen("queue:#{queue}")
275
+
276
+ return true if total_queue_size > MAX_QUEUE_LENGTH
277
+ end
197
278
  end
279
+ false
198
280
  end
199
281
 
200
282
  #
@@ -211,5 +293,6 @@ module SidekiqUniqueJobs
211
293
  conn.zscan_each(key, match: "*#{digest}*", count: 1).to_a.any?
212
294
  end
213
295
  end
296
+ # rubocop:enable Metrics/ClassLength
214
297
  end
215
298
  end
@@ -25,7 +25,9 @@ module SidekiqUniqueJobs
25
25
  #
26
26
  # @return [true, false]
27
27
  #
28
- def set(obj)
28
+ def set(obj, pipeline = nil)
29
+ return pipeline.set(key, obj) if pipeline
30
+
29
31
  redis { |conn| conn.set(key, obj) }
30
32
  end
31
33
 
@@ -68,7 +68,7 @@ module Sidekiq
68
68
  prepend UniqueExtension
69
69
  end
70
70
 
71
- if Sidekiq.const_defined?("JobRecord")
71
+ if Sidekiq.const_defined?(:JobRecord)
72
72
  # See Sidekiq::Api
73
73
  class JobRecord
74
74
  #
@@ -71,6 +71,16 @@ module SidekiqUniqueJobs # rubocop:disable Metrics/ModuleLength
71
71
  config.logger = other
72
72
  end
73
73
 
74
+ #
75
+ # Check if logging is enabled
76
+ #
77
+ #
78
+ # @return [true, false]
79
+ #
80
+ def logging?
81
+ config.logger_enabled
82
+ end
83
+
74
84
  #
75
85
  # Temporarily use another configuration and reset to the old config after yielding
76
86
  #
@@ -5,41 +5,56 @@ module SidekiqUniqueJobs
5
5
  #
6
6
  # @author Mikael Henriksson <mikael@mhenrixon.com>
7
7
  module SidekiqWorkerMethods
8
+ #
9
+ # @!attribute [r] job_class
10
+ # @return [Sidekiq::Worker] The Sidekiq::Worker implementation
11
+ attr_reader :job_class
12
+
8
13
  # Avoids duplicating worker_class.respond_to? in multiple places
9
14
  # @return [true, false]
10
- def worker_method_defined?(method_sym)
11
- worker_class.respond_to?(method_sym)
15
+ def job_method_defined?(method_sym)
16
+ job_class.respond_to?(method_sym)
12
17
  end
13
18
 
14
19
  # Wraps #get_sidekiq_options to always work with a hash
15
20
  # @return [Hash] of the worker class sidekiq options
16
- def worker_options
17
- return {} unless sidekiq_worker_class?
21
+ def job_options
22
+ return {} unless sidekiq_job_class?
18
23
 
19
- worker_class.get_sidekiq_options.deep_stringify_keys
24
+ job_class.get_sidekiq_options.deep_stringify_keys
20
25
  end
21
26
 
22
27
  # Tests that the
23
- # @return [true] if worker_class responds to get_sidekiq_options
24
- # @return [false] if worker_class does not respond to get_sidekiq_options
25
- def sidekiq_worker_class?
26
- worker_method_defined?(:get_sidekiq_options)
28
+ # @return [true] if job_class responds to get_sidekiq_options
29
+ # @return [false] if job_class does not respond to get_sidekiq_options
30
+ def sidekiq_job_class?
31
+ job_method_defined?(:get_sidekiq_options)
27
32
  end
28
33
 
29
- # The Sidekiq::Worker implementation
30
- # @return [Sidekiq::Worker]
31
- def worker_class
32
- @_worker_class ||= worker_class_constantize # rubocop:disable Naming/MemoizedInstanceVariableName
34
+ def job_class=(obj)
35
+ # this is what was originally passed in, it can be an instance or a class depending on sidekiq version
36
+ @original_job_class = obj
37
+ @job_class = job_class_constantize(obj)
33
38
  end
34
39
 
35
40
  # The hook to call after a successful unlock
36
41
  # @return [Proc]
37
- def after_unlock_hook
42
+ def after_unlock_hook # rubocop:disable Metrics/MethodLength
38
43
  lambda do
39
- if @worker_class.respond_to?(:after_unlock)
40
- @worker_class.after_unlock # instance method in sidekiq v6
41
- elsif worker_class.respond_to?(:after_unlock)
42
- worker_class.after_unlock # class method regardless of sidekiq version
44
+ if @original_job_class.respond_to?(:after_unlock)
45
+ # instance method in sidekiq v6
46
+ if @original_job_class.method(:after_unlock).arity.positive? # arity check to maintain backwards compatibility
47
+ @original_job_class.after_unlock(item)
48
+ else
49
+ @original_job_class.after_unlock
50
+ end
51
+ elsif job_class.respond_to?(:after_unlock)
52
+ # class method regardless of sidekiq version
53
+ if job_class.method(:after_unlock).arity.positive? # arity check to maintain backwards compatibility
54
+ job_class.after_unlock(item)
55
+ else
56
+ job_class.after_unlock
57
+ end
43
58
  end
44
59
  end
45
60
  end
@@ -48,7 +63,7 @@ module SidekiqUniqueJobs
48
63
  # failing back to the original argument when the constant can't be found
49
64
  #
50
65
  # @return [Sidekiq::Worker]
51
- def worker_class_constantize(klazz = @worker_class)
66
+ def job_class_constantize(klazz = @job_class)
52
67
  SidekiqUniqueJobs.safe_constantize(klazz)
53
68
  end
54
69
 
@@ -58,8 +73,12 @@ module SidekiqUniqueJobs
58
73
  #
59
74
  # @return [Hash<Symbol, Object>]
60
75
  #
61
- def default_worker_options
62
- Sidekiq.default_worker_options
76
+ def default_job_options
77
+ if Sidekiq.respond_to?(:default_job_options)
78
+ Sidekiq.default_job_options
79
+ else
80
+ Sidekiq.default_worker_options
81
+ end
63
82
  end
64
83
  end
65
84
  end
@@ -21,16 +21,24 @@ module Sidekiq
21
21
  #
22
22
  # @param [Hash<Symbol, Object>] tmp_config the temporary config to use
23
23
  #
24
- def self.use_options(tmp_config = {})
25
- old_options = default_worker_options.dup
24
+ def self.use_options(tmp_config = {}) # rubocop:disable Metrics/MethodLength
25
+ if respond_to?(:default_job_options)
26
+ default_job_options.clear
27
+ self.default_job_options = tmp_config
28
+ else
29
+ default_worker_options.clear
30
+ self.default_worker_options = tmp_config
31
+ end
26
32
 
27
- default_worker_options.clear
28
- self.default_worker_options = tmp_config
29
33
  yield
30
34
  ensure
31
- default_worker_options.clear
32
- self.default_worker_options = DEFAULT_WORKER_OPTIONS
33
- self.default_worker_options = old_options
35
+ if respond_to?(:default_job_options)
36
+ default_job_options.clear
37
+ self.default_job_options = default_job_options
38
+ else
39
+ default_worker_options.clear
40
+ self.default_worker_options = DEFAULT_WORKER_OPTIONS
41
+ end
34
42
  end
35
43
 
36
44
  #
@@ -54,20 +62,14 @@ module Sidekiq
54
62
 
55
63
  yield
56
64
  ensure
57
- self.sidekiq_options_hash = Sidekiq::DEFAULT_WORKER_OPTIONS
58
- sidekiq_options(old_options)
59
- end
65
+ self.sidekiq_options_hash =
66
+ if Sidekiq.respond_to?(:default_job_options)
67
+ Sidekiq.default_job_options
68
+ else
69
+ DEFAULT_WORKER_OPTIONS
70
+ end
60
71
 
61
- #
62
- # Clears the jobs for this worker and removes all locks
63
- #
64
- def clear
65
- jobs.each do |job|
66
- SidekiqUniqueJobs::Unlockable.unlock(job)
67
- end
68
-
69
- Sidekiq::Queues[queue].clear
70
- jobs.clear
72
+ sidekiq_options(old_options)
71
73
  end
72
74
  end
73
75
 
@@ -88,6 +90,36 @@ module Sidekiq
88
90
  super(options)
89
91
  end
90
92
 
93
+ #
94
+ # Prepends deletion of locks to clear
95
+ #
96
+ module ClassMethods
97
+ #
98
+ # Clears the jobs for this worker and removes all locks
99
+ #
100
+ def clear
101
+ jobs.each do |job|
102
+ SidekiqUniqueJobs::Unlockable.unlock(job)
103
+ end
104
+
105
+ super
106
+ end
107
+ end
108
+ end
109
+
110
+ prepend Overrides
111
+
112
+ #
113
+ # Prepends methods to Sidekiq::Worker
114
+ #
115
+ module ClassMethods
116
+ prepend Overrides::ClassMethods
117
+ end
118
+
119
+ #
120
+ # Prepends singleton methods to Sidekiq::Worker
121
+ #
122
+ module SignletonOverrides
91
123
  #
92
124
  # Clears all jobs for this worker and removes all locks
93
125
  #
@@ -98,6 +130,6 @@ module Sidekiq
98
130
  end
99
131
  end
100
132
 
101
- prepend Overrides
133
+ singleton_class.prepend SignletonOverrides
102
134
  end
103
135
  end