sidekiq-priority_queue 1.0.2 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 23819a77a9986da48fe0e15a590bd948abf62cafa7d48f7ad8aa2d6129dbb476
4
- data.tar.gz: cc8e7a8a99c095237a6b44c0a266753d98707c724e5af70b494a71b3a25476ac
3
+ metadata.gz: 0e649f9936ca3989853559d9254ce6072229a700e5b5d4f17efc1dbf723c0cbe
4
+ data.tar.gz: d53da6399ee5f1405598de900b753ebbe3cb3b7b45e7ca2991b915c76ac1a3a8
5
5
  SHA512:
6
- metadata.gz: '0884b842b53184df96c7e753d0d8d6b04621b2b5810a33e5798e95d76b13e09776ac4db62c520c0e822be0987b798deaa473e76898ecdd92b4095230b6b65501'
7
- data.tar.gz: f1bb0dfb7a3048ce7ca80924916ac6817ed16262a2d98596f1da50db35292b2a2cb3287f0fcdb61a468f21f39d0dac5de3f8989b7bdc87515f728179fbb7576a
6
+ metadata.gz: 9cf8bc3aca671561f792ce90592296655dba58541e0506c362c73ed3c54b42a8978235423c327f714b6f4749f9585ad29a942547ac5378621cfa1441bb215ef2
7
+ data.tar.gz: 8be1d8ad3d836104c19bc3e2473b6d77459ea3938557bd4672e72b92b3dc10a0e1cb2c2d94bc576e3c346c49a27528f94024bd9613603ef2e22de5ff1343d4ba
data/Gemfile.lock CHANGED
@@ -1,15 +1,15 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- sidekiq-priority_queue (1.0.2)
5
- sidekiq (>= 6)
4
+ sidekiq-priority_queue (1.0.5)
5
+ sidekiq (>= 6.2.2)
6
6
 
7
7
  GEM
8
8
  remote: https://rubygems.org/
9
9
  specs:
10
10
  byebug (11.1.3)
11
11
  coderay (1.1.3)
12
- connection_pool (2.2.3)
12
+ connection_pool (2.2.5)
13
13
  docile (1.3.4)
14
14
  method_source (1.0.0)
15
15
  minitest (5.14.3)
@@ -23,8 +23,8 @@ GEM
23
23
  rack-test (1.1.0)
24
24
  rack (>= 1.0, < 3)
25
25
  rake (13.0.3)
26
- redis (4.2.5)
27
- sidekiq (6.1.3)
26
+ redis (4.5.1)
27
+ sidekiq (6.2.2)
28
28
  connection_pool (>= 2.2.2)
29
29
  rack (~> 2.0)
30
30
  redis (>= 4.2.0)
@@ -47,4 +47,4 @@ DEPENDENCIES
47
47
  simplecov
48
48
 
49
49
  BUNDLED WITH
50
- 2.1.4
50
+ 2.2.3
@@ -50,7 +50,7 @@ module Sidekiq
50
50
 
51
51
  SubqueueCount = Struct.new(:name, :size)
52
52
 
53
- class Job < Sidekiq::Job
53
+ class Job < Sidekiq::JobRecord
54
54
 
55
55
  attr_reader :priority
56
56
  attr_reader :subqueue
@@ -1,9 +1,14 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  require 'sidekiq'
4
+ require 'sidekiq/util'
3
5
 
4
6
  module Sidekiq
5
7
  module PriorityQueue
6
8
  class ReliableFetch
9
+ include Sidekiq::Util
10
+
11
+ SUPER_PROCESSES_REGISTRY_KEY = 'super_processes_priority'
7
12
 
8
13
  UnitOfWork = Struct.new(:queue, :job, :wip_queue) do
9
14
  def acknowledge
@@ -35,33 +40,51 @@ module Sidekiq
35
40
  end
36
41
 
37
42
  def initialize(options)
43
+ @options = options
38
44
  @strictly_ordered_queues = !!options[:strict]
39
45
  @queues = options[:queues].map { |q| "priority-queue:#{q}" }
40
46
  @queues = @queues.uniq if @strictly_ordered_queues
47
+ @done = false
41
48
  @process_index = options[:index] || ENV['PROCESS_INDEX']
42
49
  end
43
50
 
51
+ def setup
52
+ Sidekiq.on(:startup) do
53
+ cleanup_the_dead
54
+ register_myself
55
+ check
56
+ end
57
+ Sidekiq.on(:shutdown) do
58
+ @done = true
59
+ end
60
+ Sidekiq.on(:heartbeat) do
61
+ register_myself
62
+ end
63
+ end
64
+
44
65
  def retrieve_work
66
+ return nil if @done
67
+
45
68
  work = @queues.detect do |q|
46
- job = zpopmin_sadd(q, wip_queue(q));
47
- break [q,job] if job
69
+ job = zpopmin_sadd(q, wip_queue(q))
70
+ break [q, job] if job
48
71
  end
49
72
  UnitOfWork.new(*work, wip_queue(work.first)) if work
50
73
  end
51
74
 
52
75
  def wip_queue(q)
53
- "#{q}_#{Socket.gethostname}_#{@process_index}"
76
+ "queue:spriorityq|#{identity}|#{q}"
54
77
  end
55
78
 
56
79
  def zpopmin_sadd(queue, wip_queue)
57
- Sidekiq.redis do |con|
58
- @script_sha ||= con.script(:load, Sidekiq::PriorityQueue::Scripts::ZPOPMIN_SADD)
59
- con.evalsha(@script_sha, [queue, wip_queue])
80
+ Sidekiq.redis do |conn|
81
+ @script_sha ||= conn.script(:load, Sidekiq::PriorityQueue::Scripts::ZPOPMIN_SADD)
82
+ conn.evalsha(@script_sha, [queue, wip_queue])
60
83
  end
61
84
  end
62
85
 
63
86
  def spop(wip_queue)
64
- Sidekiq.redis{ |con| con.spop(wip_queue) }
87
+ Sidekiq.redis { |con| con.spop(wip_queue) }
65
88
  end
66
89
 
67
90
  def queues_cmd
@@ -72,55 +95,169 @@ module Sidekiq
72
95
  end
73
96
  end
74
97
 
75
- def bulk_requeue(_inprogress, options)
76
- Sidekiq.logger.debug { "Re-queueing terminated jobs" }
77
- process_index = options[:index] || ENV['PROCESS_INDEX']
78
- self.class.requeue_wip_jobs(options[:queues], process_index)
98
+ # Below method is called when we close sidekiq process gracefully
99
+ def bulk_requeue(_inprogress, _options)
100
+ Sidekiq.logger.debug { 'Priority ReliableFetch: Re-queueing terminated jobs' }
101
+ requeue_wip_jobs
102
+ unregister_super_process
103
+ end
104
+
105
+ private
106
+
107
+ def check
108
+ check_for_orphans if orphan_check?
109
+ rescue StandardError => e
110
+ # orphan check is best effort, we don't want Redis downtime to
111
+ # break Sidekiq
112
+ Sidekiq.logger.warn { "Priority ReliableFetch: Failed to do orphan check: #{e.message}" }
79
113
  end
80
114
 
81
- def self.resume_wip_jobs(queues, process_index)
82
- Sidekiq.logger.debug { "Re-queueing WIP jobs" }
83
- process_index ||= ENV['PROCESS_INDEX']
84
- requeue_wip_jobs(queues, process_index)
115
+ def orphan_check?
116
+ delay = @options.fetch(:reliable_fetch_orphan_check, 3600).to_i
117
+ return false if delay.zero?
118
+
119
+ Sidekiq.redis do |conn|
120
+ conn.set('priority_reliable_fetch_orphan_check', Time.now.to_f, ex: delay, nx: true)
121
+ end
85
122
  end
86
123
 
87
- Sidekiq.configure_server do |config|
88
- config.on(:startup) do
89
- if reliable_fetch_active?(config)
90
- Sidekiq::PriorityQueue::ReliableFetch.resume_wip_jobs(config.options[:queues], config.options[:index])
124
+ # This method is extra paranoid verification to check Redis for any possible
125
+ # orphaned queues with jobs. If we change queue names and lose jobs in the meantime,
126
+ # this will find old queues with jobs and rescue them.
127
+ def check_for_orphans
128
+ orphans_count = 0
129
+ queues_count = 0
130
+ orphan_queues = Set.new
131
+ Sidekiq.redis do |conn|
132
+ ids = conn.smembers(SUPER_PROCESSES_REGISTRY_KEY)
133
+ Sidekiq.logger.debug("Priority ReliableFetch found #{ids.size} super processes")
134
+
135
+ conn.scan_each(match: 'queue:spriorityq|*', count: 100) do |wip_queue|
136
+ queues_count += 1
137
+ _, id, original_priority_queue_name = wip_queue.split('|')
138
+ next if ids.include?(id)
139
+
140
+ # Race condition in pulling super_processes and checking queue liveness.
141
+ # Need to verify in Redis.
142
+ unless conn.sismember(SUPER_PROCESSES_REGISTRY_KEY, id)
143
+ orphan_queues << original_priority_queue_name
144
+ queue_jobs_count = 0
145
+ loop do
146
+ break if conn.scard(wip_queue).zero?
147
+
148
+ # Here we should wrap below two operations in Lua script
149
+ item = conn.spop(wip_queue)
150
+ conn.zadd(original_priority_queue_name, 0, item)
151
+ orphans_count += 1
152
+ queue_jobs_count += 1
153
+ end
154
+ if queue_jobs_count.positive?
155
+ Sidekiq::Pro.metrics.increment('jobs.recovered.fetch', by: queue_jobs_count, tags: ["queue:#{original_priority_queue_name}"])
156
+ end
157
+ end
91
158
  end
92
159
  end
160
+
161
+ if orphans_count.positive?
162
+ Sidekiq.logger.warn { "Priority ReliableFetch recovered #{orphans_count} orphaned jobs in queues: #{orphan_queues.to_a.inspect}" }
163
+ elsif queues_count.positive?
164
+ Sidekiq.logger.info { "Priority ReliableFetch found #{queues_count} working queues with no orphaned jobs" }
165
+ end
166
+ orphans_count
93
167
  end
94
168
 
95
- private
169
+ # Below method is only to make sure we get jobs from incorrectly closed process (for example force killed using kill -9 SIDEKIQ_PID)
170
+ def cleanup_the_dead
171
+ overall_moved_count = 0
172
+ Sidekiq.redis do |conn|
173
+ conn.sscan_each(SUPER_PROCESSES_REGISTRY_KEY) do |super_process|
174
+ next if conn.exists?(super_process) # Don't clean up currently running processes
175
+
176
+ Sidekiq.logger.debug { "Priority ReliableFetch: Moving job from #{super_process} back to original queues" }
177
+
178
+ # We need to pushback any leftover jobs still in WIP
179
+ previously_handled_queues = conn.smembers("#{super_process}:super_priority_queues")
180
+
181
+ # Below previously_handled_queues are simply WIP queues of previous, dead processes
182
+ previously_handled_queues.each do |previously_handled_queue|
183
+ queue_moved_size = 0
184
+ original_priority_queue_name = previously_handled_queue.split('|').last
185
+
186
+ Sidekiq.logger.debug { "Priority ReliableFetch: Moving job from #{previously_handled_queue} back to original queue: #{original_priority_queue_name}" }
187
+ loop do
188
+ break if conn.scard(previously_handled_queue).zero?
189
+
190
+ # Here we should wrap below two operations in Lua script
191
+ item = conn.spop(previously_handled_queue)
192
+ conn.zadd(original_priority_queue_name, 0, item)
193
+ queue_moved_size += 1
194
+ overall_moved_count += 1
195
+ end
196
+ # Below we simply remove old WIP queue
197
+ conn.del(previously_handled_queue) if conn.scard(previously_handled_queue).zero?
198
+ Sidekiq.logger.debug { "Priority ReliableFetch: Moved #{queue_moved_size} jobs from ##{previously_handled_queue} back to original_queue: #{original_priority_queue_name} " }
199
+ end
96
200
 
97
- def self.reliable_fetch_active?(config)
98
- return true if config.options[:fetch].is_a?(Sidekiq::PriorityQueue::ReliableFetch)
99
- return config.options[:fetch].is_a?(Sidekiq::PriorityQueue::CombinedFetch) &&
100
- config.options[:fetch].fetches.any? { |f| f.is_a?(Sidekiq::PriorityQueue::ReliableFetch) }
201
+ Sidekiq.logger.debug { "Priority ReliableFetch: Unregistering super process #{super_process}" }
202
+ conn.del("#{super_process}:super_priority_queues")
203
+ conn.srem(SUPER_PROCESSES_REGISTRY_KEY, super_process)
204
+ end
205
+ end
206
+ Sidekiq.logger.debug { "Priority ReliableFetch: Moved overall #{overall_moved_count} jobs from WIP queues" }
207
+ rescue StandardError => e
208
+ # best effort, ignore Redis network errors
209
+ Sidekiq.logger.warn { "Priority ReliableFetch: Failed to requeue: #{e.message}" }
101
210
  end
102
211
 
103
- def self.requeue_wip_jobs(queues, index)
212
+ def requeue_wip_jobs
104
213
  jobs_to_requeue = {}
105
214
  Sidekiq.redis do |conn|
106
- queues.map { |q| "priority-queue:#{q}" }.each do |q|
107
- wip_queue = "#{q}_#{Socket.gethostname}_#{index}"
215
+ @queues.each do |q|
216
+ wip_queue_name = wip_queue(q)
108
217
  jobs_to_requeue[q] = []
109
- while job = conn.spop(wip_queue) do
218
+
219
+ while job = conn.spop(wip_queue_name)
110
220
  jobs_to_requeue[q] << job
111
221
  end
112
222
  end
113
223
 
114
224
  conn.pipelined do
115
225
  jobs_to_requeue.each do |queue, jobs|
116
- return unless jobs.size > 0
117
- conn.zadd(queue, jobs.map{|j| [0,j] })
226
+ next if jobs.empty? # ZADD doesn't work with empty arrays
227
+
228
+ conn.zadd(queue, jobs.map { |j| [0, j] })
118
229
  end
119
230
  end
120
231
  end
121
- Sidekiq.logger.info("Pushed #{ jobs_to_requeue.map{|q| q.size }.reduce(:+) } jobs back to Redis")
122
- rescue => ex
123
- Sidekiq.logger.warn("Failed to requeue #{ jobs_to_requeue.map{|q| q.size }.reduce(:+) } jobs: #{ex.message}")
232
+ Sidekiq.logger.info("Priority ReliableFetch: Pushed #{jobs_to_requeue.values.flatten.size} jobs back to Redis")
233
+ rescue StandardError => e
234
+ Sidekiq.logger.warn("Priority ReliableFetch: Failed to requeue #{jobs_to_requeue.values.flatten.size} jobs: #{e.message}")
235
+ end
236
+
237
+ def register_myself
238
+ super_process_wip_queues = @queues.map { |q| wip_queue(q) }
239
+ id = identity # This is from standard sidekiq, updated with every heartbeat
240
+
241
+ # This method will run multiple times so seeing this message twice is no problem.
242
+ Sidekiq.logger.debug { "Priority ReliableFetch: Registering super process #{id} with #{super_process_wip_queues}" }
243
+
244
+ Sidekiq.redis do |conn|
245
+ conn.multi do
246
+ conn.sadd(SUPER_PROCESSES_REGISTRY_KEY, id)
247
+ conn.sadd("#{id}:super_priority_queues", super_process_wip_queues)
248
+ end
249
+ end
250
+ end
251
+
252
+ def unregister_super_process
253
+ id = identity
254
+ Sidekiq.logger.debug { "Priority ReliableFetch: Unregistering super process #{id}" }
255
+ Sidekiq.redis do |conn|
256
+ conn.multi do
257
+ conn.srem(SUPER_PROCESSES_REGISTRY_KEY, id)
258
+ conn.del("#{id}:super_priority_queues")
259
+ end
260
+ end
124
261
  end
125
262
  end
126
263
  end
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'sidekiq-priority_queue'
3
- s.version = '1.0.2'
3
+ s.version = '1.0.6'
4
4
  s.date = '2018-07-31'
5
5
  s.summary = "Priority Queuing for Sidekiq"
6
6
  s.description = "An extension for Sidekiq allowing jobs in a single queue to be executed by a priority score rather than FIFO"
@@ -11,6 +11,6 @@ Gem::Specification.new do |s|
11
11
  s.license = 'MIT'
12
12
  s.required_ruby_version = '>= 2.5.0'
13
13
 
14
- s.add_dependency 'sidekiq', '>= 6'
14
+ s.add_dependency 'sidekiq', '>= 6.2.2'
15
15
  s.add_development_dependency 'minitest', '~> 5.10', '>= 5.10.1'
16
16
  end
metadata CHANGED
@@ -1,12 +1,12 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sidekiq-priority_queue
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.2
4
+ version: 1.0.6
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jacob Matthews
8
8
  - Petr Kopac
9
- autorequire:
9
+ autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
12
  date: 2018-07-31 00:00:00.000000000 Z
@@ -17,14 +17,14 @@ dependencies:
17
17
  requirements:
18
18
  - - ">="
19
19
  - !ruby/object:Gem::Version
20
- version: '6'
20
+ version: 6.2.2
21
21
  type: :runtime
22
22
  prerelease: false
23
23
  version_requirements: !ruby/object:Gem::Requirement
24
24
  requirements:
25
25
  - - ">="
26
26
  - !ruby/object:Gem::Version
27
- version: '6'
27
+ version: 6.2.2
28
28
  - !ruby/object:Gem::Dependency
29
29
  name: minitest
30
30
  requirement: !ruby/object:Gem::Requirement
@@ -78,7 +78,7 @@ homepage: https://github.com/chartmogul/sidekiq-priority_queue
78
78
  licenses:
79
79
  - MIT
80
80
  metadata: {}
81
- post_install_message:
81
+ post_install_message:
82
82
  rdoc_options: []
83
83
  require_paths:
84
84
  - lib
@@ -93,8 +93,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
93
93
  - !ruby/object:Gem::Version
94
94
  version: '0'
95
95
  requirements: []
96
- rubygems_version: 3.1.4
97
- signing_key:
96
+ rubygems_version: 3.2.3
97
+ signing_key:
98
98
  specification_version: 4
99
99
  summary: Priority Queuing for Sidekiq
100
100
  test_files: []