sidekiq-priority_queue 1.0.3 → 1.0.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Gemfile.lock +3 -3
- data/lib/sidekiq/priority_queue/reliable_fetch.rb +101 -31
- data/sidekiq-priority_queue.gemspec +1 -1
- metadata +5 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 041d042d2b08144431bc0e2ea76043e0f5f7e5cec5c28fc1c5a76b17c5390069
|
4
|
+
data.tar.gz: e2e2dc2e38375942d02a93740191a4aca744caa8d3462667dbd63780e0585ff6
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: b02c99baec57233cc4d315662faeb078d75d4bf887d43ec22cbec313bb4e1b7f2716236c1cbae27284871bd335d9701d26bb7ab9737438212e5448aa0ef2858a
|
7
|
+
data.tar.gz: 48e42a6e9eb50b28ae3446a4aacbdb3294fc734b48421fb4cbe86fc3add06b0feaf95c1eed3f60f1ae2a0e0e1b372c979f05097a8dbfbefd3c416143a0716c0c
|
data/Gemfile.lock
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
PATH
|
2
2
|
remote: .
|
3
3
|
specs:
|
4
|
-
sidekiq-priority_queue (1.0.
|
4
|
+
sidekiq-priority_queue (1.0.4)
|
5
5
|
sidekiq (>= 6.2.2)
|
6
6
|
|
7
7
|
GEM
|
@@ -23,7 +23,7 @@ GEM
|
|
23
23
|
rack-test (1.1.0)
|
24
24
|
rack (>= 1.0, < 3)
|
25
25
|
rake (13.0.3)
|
26
|
-
redis (4.
|
26
|
+
redis (4.5.1)
|
27
27
|
sidekiq (6.2.2)
|
28
28
|
connection_pool (>= 2.2.2)
|
29
29
|
rack (~> 2.0)
|
@@ -47,4 +47,4 @@ DEPENDENCIES
|
|
47
47
|
simplecov
|
48
48
|
|
49
49
|
BUNDLED WITH
|
50
|
-
2.
|
50
|
+
2.2.3
|
@@ -1,9 +1,11 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
require 'sidekiq'
|
3
|
+
require 'sidekiq/util'
|
3
4
|
|
4
5
|
module Sidekiq
|
5
6
|
module PriorityQueue
|
6
7
|
class ReliableFetch
|
8
|
+
include Sidekiq::Util
|
7
9
|
|
8
10
|
UnitOfWork = Struct.new(:queue, :job, :wip_queue) do
|
9
11
|
def acknowledge
|
@@ -38,10 +40,26 @@ module Sidekiq
|
|
38
40
|
@strictly_ordered_queues = !!options[:strict]
|
39
41
|
@queues = options[:queues].map { |q| "priority-queue:#{q}" }
|
40
42
|
@queues = @queues.uniq if @strictly_ordered_queues
|
43
|
+
@done = false
|
41
44
|
@process_index = options[:index] || ENV['PROCESS_INDEX']
|
42
45
|
end
|
43
46
|
|
47
|
+
def setup
|
48
|
+
Sidekiq.on(:startup) do
|
49
|
+
cleanup_the_dead
|
50
|
+
register_myself
|
51
|
+
end
|
52
|
+
Sidekiq.on(:shutdown) do
|
53
|
+
@done = true
|
54
|
+
end
|
55
|
+
Sidekiq.on(:heartbeat) do
|
56
|
+
register_myself
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
44
60
|
def retrieve_work
|
61
|
+
return nil if @done
|
62
|
+
|
45
63
|
work = @queues.detect do |q|
|
46
64
|
job = zpopmin_sadd(q, wip_queue(q));
|
47
65
|
break [q,job] if job
|
@@ -50,7 +68,7 @@ module Sidekiq
|
|
50
68
|
end
|
51
69
|
|
52
70
|
def wip_queue(q)
|
53
|
-
"
|
71
|
+
"queue:spriorityq|#{identity}|#{q}"
|
54
72
|
end
|
55
73
|
|
56
74
|
def zpopmin_sadd(queue, wip_queue)
|
@@ -61,7 +79,7 @@ module Sidekiq
|
|
61
79
|
end
|
62
80
|
|
63
81
|
def spop(wip_queue)
|
64
|
-
Sidekiq.redis{ |con| con.spop(wip_queue) }
|
82
|
+
Sidekiq.redis { |con| con.spop(wip_queue) }
|
65
83
|
end
|
66
84
|
|
67
85
|
def queues_cmd
|
@@ -72,55 +90,107 @@ module Sidekiq
|
|
72
90
|
end
|
73
91
|
end
|
74
92
|
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
93
|
+
# Below method is called when we close sidekiq process gracefully
|
94
|
+
def bulk_requeue(_inprogress, _options)
|
95
|
+
Sidekiq.logger.debug { "Priority ReliableFetch: Re-queueing terminated jobs" }
|
96
|
+
requeue_wip_jobs
|
97
|
+
unregister_super_process
|
79
98
|
end
|
80
99
|
|
81
|
-
|
82
|
-
Sidekiq.logger.debug { "Re-queueing WIP jobs" }
|
83
|
-
process_index ||= ENV['PROCESS_INDEX']
|
84
|
-
requeue_wip_jobs(queues, process_index)
|
85
|
-
end
|
100
|
+
private
|
86
101
|
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
102
|
+
# Below method is only to make sure we get jobs from incorrectly closed process (for example force killed using kill -9 SIDEKIQ_PID)
|
103
|
+
def cleanup_the_dead
|
104
|
+
overall_moved_count = 0
|
105
|
+
Sidekiq.redis do |conn|
|
106
|
+
conn.sscan_each("super_processes_priority") do |super_process|
|
107
|
+
next if conn.exists?(super_process) # Don't clean up currently running processes
|
108
|
+
|
109
|
+
Sidekiq.logger.debug { "Priority ReliableFetch: Moving job from #{super_process} back to original queues" }
|
110
|
+
|
111
|
+
# We need to pushback any leftover jobs still in WIP
|
112
|
+
previously_handled_queues = conn.smembers("#{super_process}:super_priority_queues")
|
113
|
+
|
114
|
+
# Below previously_handled_queues are simply WIP queues of previous, dead processes
|
115
|
+
previously_handled_queues.each do |previously_handled_queue|
|
116
|
+
queue_moved_size = 0
|
117
|
+
original_priority_queue_name = previously_handled_queue.split('|').last
|
118
|
+
|
119
|
+
Sidekiq.logger.debug { "Priority ReliableFetch: Moving job from #{previously_handled_queue} back to original queue: #{original_priority_queue_name}" }
|
120
|
+
loop do
|
121
|
+
break if conn.scard(previously_handled_queue) == 0
|
122
|
+
|
123
|
+
# Here we should wrap below two operations in Lua script
|
124
|
+
item = conn.spop(previously_handled_queue)
|
125
|
+
conn.zadd(original_priority_queue_name, 0, item)
|
126
|
+
queue_moved_size += 1
|
127
|
+
overall_moved_count += 1
|
128
|
+
end
|
129
|
+
# Below we simply remove old WIP queue
|
130
|
+
conn.srem(previously_handled_queue) if conn.scard(previously_handled_queue) == 0
|
131
|
+
Sidekiq.logger.debug { "Priority ReliableFetch: Moved #{queue_moved_size} jobs from ##{previously_handled_queue} back to original_queue: #{original_priority_queue_name} "}
|
132
|
+
end
|
133
|
+
|
134
|
+
Sidekiq.logger.debug { "Priority ReliableFetch: Unregistering super process #{super_process}" }
|
135
|
+
conn.del("#{super_process}:super_priority_queues")
|
136
|
+
conn.srem("super_processes_priority", super_process)
|
91
137
|
end
|
92
138
|
end
|
139
|
+
Sidekiq.logger.debug { "Priority ReliableFetch: Moved overall #{overall_moved_count} jobs from WIP queues" }
|
140
|
+
rescue => ex
|
141
|
+
# best effort, ignore Redis network errors
|
142
|
+
Sidekiq.logger.warn { "Priority ReliableFetch: Failed to requeue: #{ex.message}" }
|
93
143
|
end
|
94
144
|
|
95
|
-
|
96
|
-
|
97
|
-
def self.reliable_fetch_active?(config)
|
98
|
-
return true if config.options[:fetch].is_a?(Sidekiq::PriorityQueue::ReliableFetch)
|
99
|
-
return config.options[:fetch].is_a?(Sidekiq::PriorityQueue::CombinedFetch) &&
|
100
|
-
config.options[:fetch].fetches.any? { |f| f.is_a?(Sidekiq::PriorityQueue::ReliableFetch) }
|
101
|
-
end
|
102
|
-
|
103
|
-
def self.requeue_wip_jobs(queues, index)
|
145
|
+
def requeue_wip_jobs
|
104
146
|
jobs_to_requeue = {}
|
105
147
|
Sidekiq.redis do |conn|
|
106
|
-
queues.
|
107
|
-
|
148
|
+
@queues.each do |q|
|
149
|
+
wip_queue_name = wip_queue(q)
|
108
150
|
jobs_to_requeue[q] = []
|
109
|
-
|
151
|
+
|
152
|
+
while job = conn.spop(wip_queue_name) do
|
110
153
|
jobs_to_requeue[q] << job
|
111
154
|
end
|
112
155
|
end
|
113
156
|
|
114
157
|
conn.pipelined do
|
115
158
|
jobs_to_requeue.each do |queue, jobs|
|
116
|
-
|
117
|
-
|
159
|
+
next if jobs.size == 0 # ZADD doesn't work with empty arrays
|
160
|
+
|
161
|
+
conn.zadd(queue, jobs.map {|j| [0, j] })
|
118
162
|
end
|
119
163
|
end
|
120
164
|
end
|
121
|
-
Sidekiq.logger.info("Pushed #{ jobs_to_requeue.
|
165
|
+
Sidekiq.logger.info("Priority ReliableFetch: Pushed #{ jobs_to_requeue.values.flatten.size } jobs back to Redis")
|
122
166
|
rescue => ex
|
123
|
-
Sidekiq.logger.warn("Failed to requeue #{ jobs_to_requeue.
|
167
|
+
Sidekiq.logger.warn("Priority ReliableFetch: Failed to requeue #{ jobs_to_requeue.values.flatten.size } jobs: #{ex.message}")
|
168
|
+
end
|
169
|
+
|
170
|
+
def register_myself
|
171
|
+
super_process_wip_queues = @queues.map { |q| wip_queue(q) }
|
172
|
+
id = identity # This is from standard sidekiq, updated with every heartbeat
|
173
|
+
|
174
|
+
# This method will run multiple times so seeing this message twice is no problem.
|
175
|
+
Sidekiq.logger.debug { "Priority ReliableFetch: Registering super process #{id} with #{super_process_wip_queues}" }
|
176
|
+
|
177
|
+
Sidekiq.redis do |conn|
|
178
|
+
conn.multi do
|
179
|
+
conn.sadd("super_processes_priority", id)
|
180
|
+
conn.sadd("#{id}:super_priority_queues", super_process_wip_queues)
|
181
|
+
end
|
182
|
+
end
|
183
|
+
end
|
184
|
+
|
185
|
+
def unregister_super_process
|
186
|
+
id = identity
|
187
|
+
Sidekiq.logger.debug { "Priority ReliableFetch: Unregistering super process #{id}" }
|
188
|
+
Sidekiq.redis do |conn|
|
189
|
+
conn.multi do
|
190
|
+
conn.srem("super_processes_priority", id)
|
191
|
+
conn.del("#{id}:super_priority_queues")
|
192
|
+
end
|
193
|
+
end
|
124
194
|
end
|
125
195
|
end
|
126
196
|
end
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Gem::Specification.new do |s|
|
2
2
|
s.name = 'sidekiq-priority_queue'
|
3
|
-
s.version = '1.0.
|
3
|
+
s.version = '1.0.4'
|
4
4
|
s.date = '2018-07-31'
|
5
5
|
s.summary = "Priority Queuing for Sidekiq"
|
6
6
|
s.description = "An extension for Sidekiq allowing jobs in a single queue to be executed by a priority score rather than FIFO"
|
metadata
CHANGED
@@ -1,12 +1,12 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: sidekiq-priority_queue
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.0.
|
4
|
+
version: 1.0.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Jacob Matthews
|
8
8
|
- Petr Kopac
|
9
|
-
autorequire:
|
9
|
+
autorequire:
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
12
|
date: 2018-07-31 00:00:00.000000000 Z
|
@@ -78,7 +78,7 @@ homepage: https://github.com/chartmogul/sidekiq-priority_queue
|
|
78
78
|
licenses:
|
79
79
|
- MIT
|
80
80
|
metadata: {}
|
81
|
-
post_install_message:
|
81
|
+
post_install_message:
|
82
82
|
rdoc_options: []
|
83
83
|
require_paths:
|
84
84
|
- lib
|
@@ -93,8 +93,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
93
93
|
- !ruby/object:Gem::Version
|
94
94
|
version: '0'
|
95
95
|
requirements: []
|
96
|
-
rubygems_version: 3.
|
97
|
-
signing_key:
|
96
|
+
rubygems_version: 3.2.3
|
97
|
+
signing_key:
|
98
98
|
specification_version: 4
|
99
99
|
summary: Priority Queuing for Sidekiq
|
100
100
|
test_files: []
|