gitlab-sidekiq-fetcher 0.5.5 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: eb384451139d1638854cd94a673b12e0f7fd5a42c663535da0145fd153ba4c51
4
- data.tar.gz: 43851081b6406fe8a876824ef034c548665382707db0aaf41c84c186a98419b0
3
+ metadata.gz: c7be23d59956ffa44288a1c870bcca66fd0119682f810325d71a3ebaa8b76e80
4
+ data.tar.gz: 013a7124f61044572ad93335e95c18357c60804dd89024d987485b2d87775787
5
5
  SHA512:
6
- metadata.gz: 617032e226ffc898b1c411b0f7598169272bc8521156b9eadcb35b48a139cd854f1b12a2fd98e7fd174128727f0b8c60917780cd2b6dec82cb1370a6260b20ee
7
- data.tar.gz: 5e56e3840a5f0a4f437ec298de7b68ef796dcfa7d31e69117398ceb22b4ed07fbb4eac4e711b344f5134fd748f9afa476d0d4ffc216cc02cd4eec5b9db90a9c2
6
+ metadata.gz: 92653bc5f9b5729f4dd50a8243a869c20d9621a1a9d25c46d729e735895e0f2d4d940c5a766803f1a5fd908ab0d9340f27d255dbd99f31bab4923e2f539c1882
7
+ data.tar.gz: d763b8b0ee3c2522752130fac86b83e67e8513faf919dd361aad9896aac684809650959b1627dd38e0440343c063f95b807b67fc31a10217ce1f15c428759803
data/.gitlab-ci.yml CHANGED
@@ -40,7 +40,6 @@ integration_reliable:
40
40
  variables:
41
41
  JOB_FETCHER: reliable
42
42
 
43
-
44
43
  integration_basic:
45
44
  extends: .integration
46
45
  allow_failure: yes
@@ -63,7 +62,6 @@ term_interruption:
63
62
  services:
64
63
  - redis:alpine
65
64
 
66
-
67
65
  # rubocop:
68
66
  # script:
69
67
  # - bundle exec rubocop
data/Gemfile CHANGED
@@ -7,6 +7,6 @@ git_source(:github) { |repo_name| "https://github.com/#{repo_name}" }
7
7
  group :test do
8
8
  gem "rspec", '~> 3'
9
9
  gem "pry"
10
- gem "sidekiq", '~> 5.0'
10
+ gem "sidekiq", '~> 6.1'
11
11
  gem 'simplecov', require: false
12
12
  end
data/Gemfile.lock CHANGED
@@ -2,7 +2,7 @@ GEM
2
2
  remote: https://rubygems.org/
3
3
  specs:
4
4
  coderay (1.1.2)
5
- connection_pool (2.2.2)
5
+ connection_pool (2.2.3)
6
6
  diff-lcs (1.3)
7
7
  docile (1.3.1)
8
8
  json (2.1.0)
@@ -10,10 +10,8 @@ GEM
10
10
  pry (0.11.3)
11
11
  coderay (~> 1.1.0)
12
12
  method_source (~> 0.9.0)
13
- rack (2.0.5)
14
- rack-protection (2.0.4)
15
- rack
16
- redis (4.0.2)
13
+ rack (2.2.3)
14
+ redis (4.2.1)
17
15
  rspec (3.8.0)
18
16
  rspec-core (~> 3.8.0)
19
17
  rspec-expectations (~> 3.8.0)
@@ -27,10 +25,10 @@ GEM
27
25
  diff-lcs (>= 1.2.0, < 2.0)
28
26
  rspec-support (~> 3.8.0)
29
27
  rspec-support (3.8.0)
30
- sidekiq (5.2.2)
31
- connection_pool (~> 2.2, >= 2.2.2)
32
- rack-protection (>= 1.5.0)
33
- redis (>= 3.3.5, < 5)
28
+ sidekiq (6.1.0)
29
+ connection_pool (>= 2.2.2)
30
+ rack (~> 2.0)
31
+ redis (>= 4.2.0)
34
32
  simplecov (0.16.1)
35
33
  docile (~> 1.1)
36
34
  json (>= 1.8, < 3)
@@ -43,8 +41,8 @@ PLATFORMS
43
41
  DEPENDENCIES
44
42
  pry
45
43
  rspec (~> 3)
46
- sidekiq (~> 5.0)
44
+ sidekiq (~> 6.1)
47
45
  simplecov
48
46
 
49
47
  BUNDLED WITH
50
- 1.17.1
48
+ 1.17.2
data/README.md CHANGED
@@ -6,6 +6,10 @@ fetches from Redis.
6
6
 
7
7
  It's based on https://github.com/TEA-ebook/sidekiq-reliable-fetch.
8
8
 
9
+ **IMPORTANT NOTE:** Since version `0.7.0` this gem works only with `sidekiq >= 6.1` (which introduced Fetch API breaking changes). Please use version `~> 0.5` if you use older version of the `sidekiq` .
10
+
11
+ **UPGRADE NOTE:** If upgrading from 0.7.0, strongly consider a full deployed step on 0.7.1 before 0.8.0; that fixes a bug in the queue name validation that will hit if sidekiq nodes running 0.7.0 see working queues named by 0.8.0. See https://gitlab.com/gitlab-org/sidekiq-reliable-fetch/-/merge_requests/22
12
+
9
13
  There are two strategies implemented: [Reliable fetch](http://redis.io/commands/rpoplpush#pattern-reliable-queue) using `rpoplpush` command and
10
14
  semi-reliable fetch that uses regular `brpop` and `lpush` to pick the job and put it to working queue. The main benefit of "Reliable" strategy is that `rpoplpush` is atomic, eliminating a race condition in which jobs can be lost.
11
15
  However, it comes at a cost because `rpoplpush` can't watch multiple lists at the same time so we need to iterate over the entire queue list which significantly increases pressure on Redis when there are more than a few queues. The "semi-reliable" strategy is much more reliable than the default Sidekiq fetcher, though. Compared to the reliable fetch strategy, it does not increase pressure on Redis significantly.
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'gitlab-sidekiq-fetcher'
3
- s.version = '0.5.5'
3
+ s.version = '0.8.0'
4
4
  s.authors = ['TEA', 'GitLab']
5
5
  s.email = 'valery@gitlab.com'
6
6
  s.license = 'LGPL-3.0'
@@ -10,5 +10,5 @@ Gem::Specification.new do |s|
10
10
  s.require_paths = ['lib']
11
11
  s.files = `git ls-files`.split($\)
12
12
  s.test_files = []
13
- s.add_dependency 'sidekiq', '~> 5'
13
+ s.add_dependency 'sidekiq', '~> 6.1'
14
14
  end
@@ -45,11 +45,13 @@ module Sidekiq
45
45
  end
46
46
 
47
47
  def self.setup_reliable_fetch!(config)
48
- config.options[:fetch] = if config.options[:semi_reliable_fetch]
49
- Sidekiq::SemiReliableFetch
50
- else
51
- Sidekiq::ReliableFetch
52
- end
48
+ fetch_strategy = if config.options[:semi_reliable_fetch]
49
+ Sidekiq::SemiReliableFetch
50
+ else
51
+ Sidekiq::ReliableFetch
52
+ end
53
+
54
+ config.options[:fetch] = fetch_strategy.new(config.options)
53
55
 
54
56
  Sidekiq.logger.info('GitLab reliable fetch activated!')
55
57
 
@@ -92,7 +94,44 @@ module Sidekiq
92
94
  Sidekiq.logger.debug("Heartbeat for #{identity}")
93
95
  end
94
96
 
95
- def self.bulk_requeue(inprogress, _options)
97
+ def self.worker_dead?(identity, conn)
98
+ !conn.get(heartbeat_key(identity))
99
+ end
100
+
101
+ def self.heartbeat_key(identity)
102
+ "reliable-fetcher-heartbeat-#{identity.gsub(':', '-')}"
103
+ end
104
+
105
+ def self.working_queue_name(queue)
106
+ "#{WORKING_QUEUE_PREFIX}:#{queue}:#{identity}"
107
+ end
108
+
109
+ attr_reader :cleanup_interval, :last_try_to_take_lease_at, :lease_interval,
110
+ :queues, :use_semi_reliable_fetch,
111
+ :strictly_ordered_queues
112
+
113
+ def initialize(options)
114
+ raise ArgumentError, 'missing queue list' unless options[:queues]
115
+
116
+ @cleanup_interval = options.fetch(:cleanup_interval, DEFAULT_CLEANUP_INTERVAL)
117
+ @lease_interval = options.fetch(:lease_interval, DEFAULT_LEASE_INTERVAL)
118
+ @last_try_to_take_lease_at = 0
119
+ @strictly_ordered_queues = !!options[:strict]
120
+ @queues = options[:queues].map { |q| "queue:#{q}" }
121
+ end
122
+
123
+ def retrieve_work
124
+ clean_working_queues! if take_lease
125
+
126
+ retrieve_unit_of_work
127
+ end
128
+
129
+ def retrieve_unit_of_work
130
+ raise NotImplementedError,
131
+ "#{self.class} does not implement #{__method__}"
132
+ end
133
+
134
+ def bulk_requeue(inprogress, _options)
96
135
  return if inprogress.empty?
97
136
 
98
137
  Sidekiq.redis do |conn|
@@ -100,7 +139,7 @@ module Sidekiq
100
139
  conn.multi do |multi|
101
140
  preprocess_interrupted_job(unit_of_work.job, unit_of_work.queue, multi)
102
141
 
103
- multi.lrem(working_queue_name(unit_of_work.queue), 1, unit_of_work.job)
142
+ multi.lrem(self.class.working_queue_name(unit_of_work.queue), 1, unit_of_work.job)
104
143
  end
105
144
  end
106
145
  end
@@ -108,15 +147,9 @@ module Sidekiq
108
147
  Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{e.message}")
109
148
  end
110
149
 
111
- def self.clean_working_queue!(original_queue, working_queue)
112
- Sidekiq.redis do |conn|
113
- while job = conn.rpop(working_queue)
114
- preprocess_interrupted_job(job, original_queue)
115
- end
116
- end
117
- end
150
+ private
118
151
 
119
- def self.preprocess_interrupted_job(job, queue, conn = nil)
152
+ def preprocess_interrupted_job(job, queue, conn = nil)
120
153
  msg = Sidekiq.load_json(job)
121
154
  msg['interrupted_count'] = msg['interrupted_count'].to_i + 1
122
155
 
@@ -127,7 +160,21 @@ module Sidekiq
127
160
  end
128
161
  end
129
162
 
130
- def self.extract_queue_and_identity(key)
163
+ # If you want this method to be run in a scope of multi connection
164
+ # you need to pass it
165
+ def requeue_job(queue, msg, conn)
166
+ with_connection(conn) do |conn|
167
+ conn.lpush(queue, Sidekiq.dump_json(msg))
168
+ end
169
+
170
+ Sidekiq.logger.info(
171
+ message: "Pushed job #{msg['jid']} back to queue #{queue}",
172
+ jid: msg['jid'],
173
+ queue: queue
174
+ )
175
+ end
176
+
177
+ def extract_queue_and_identity(key)
131
178
  # New identity format is "{hostname}:{pid}:{randomhex}
132
179
  # Old identity format is "{hostname}:{pid}"
133
180
  # Queue names may also have colons (namespaced).
@@ -142,7 +189,7 @@ module Sidekiq
142
189
 
143
190
  # Detect "old" jobs and requeue them because the worker they were assigned
144
191
  # to probably failed miserably.
145
- def self.clean_working_queues!
192
+ def clean_working_queues!
146
193
  Sidekiq.logger.info('Cleaning working queues')
147
194
 
148
195
  Sidekiq.redis do |conn|
@@ -151,30 +198,26 @@ module Sidekiq
151
198
 
152
199
  next if original_queue.nil? || identity.nil?
153
200
 
154
- clean_working_queue!(original_queue, key) if worker_dead?(identity, conn)
201
+ clean_working_queue!(original_queue, key) if self.class.worker_dead?(identity, conn)
155
202
  end
156
203
  end
157
204
  end
158
205
 
159
- def self.worker_dead?(identity, conn)
160
- !conn.get(heartbeat_key(identity))
161
- end
162
-
163
- def self.heartbeat_key(identity)
164
- "reliable-fetcher-heartbeat-#{identity.gsub(':', '-')}"
165
- end
166
-
167
- def self.working_queue_name(queue)
168
- "#{WORKING_QUEUE_PREFIX}:#{queue}:#{identity}"
206
+ def clean_working_queue!(original_queue, working_queue)
207
+ Sidekiq.redis do |conn|
208
+ while job = conn.rpop(working_queue)
209
+ preprocess_interrupted_job(job, original_queue)
210
+ end
211
+ end
169
212
  end
170
213
 
171
- def self.interruption_exhausted?(msg)
214
+ def interruption_exhausted?(msg)
172
215
  return false if max_retries_after_interruption(msg['class']) < 0
173
216
 
174
217
  msg['interrupted_count'].to_i >= max_retries_after_interruption(msg['class'])
175
218
  end
176
219
 
177
- def self.max_retries_after_interruption(worker_class)
220
+ def max_retries_after_interruption(worker_class)
178
221
  max_retries_after_interruption = nil
179
222
 
180
223
  max_retries_after_interruption ||= begin
@@ -187,7 +230,7 @@ module Sidekiq
187
230
  max_retries_after_interruption
188
231
  end
189
232
 
190
- def self.send_to_quarantine(msg, multi_connection = nil)
233
+ def send_to_quarantine(msg, multi_connection = nil)
191
234
  Sidekiq.logger.warn(
192
235
  class: msg['class'],
193
236
  jid: msg['jid'],
@@ -198,52 +241,13 @@ module Sidekiq
198
241
  Sidekiq::InterruptedSet.new.put(job, connection: multi_connection)
199
242
  end
200
243
 
201
- # If you want this method to be run is a scope of multi connection
202
- # you need to pass it
203
- def self.requeue_job(queue, msg, conn)
204
- with_connection(conn) do |conn|
205
- conn.lpush(queue, Sidekiq.dump_json(msg))
206
- end
207
-
208
- Sidekiq.logger.info(
209
- message: "Pushed job #{msg['jid']} back to queue #{queue}",
210
- jid: msg['jid'],
211
- queue: queue
212
- )
213
- end
214
-
215
244
  # Yield block with an existing connection or creates another one
216
- def self.with_connection(conn, &block)
245
+ def with_connection(conn)
217
246
  return yield(conn) if conn
218
247
 
219
- Sidekiq.redis { |conn| yield(conn) }
220
- end
221
-
222
- attr_reader :cleanup_interval, :last_try_to_take_lease_at, :lease_interval,
223
- :queues, :use_semi_reliable_fetch,
224
- :strictly_ordered_queues
225
-
226
- def initialize(options)
227
- @cleanup_interval = options.fetch(:cleanup_interval, DEFAULT_CLEANUP_INTERVAL)
228
- @lease_interval = options.fetch(:lease_interval, DEFAULT_LEASE_INTERVAL)
229
- @last_try_to_take_lease_at = 0
230
- @strictly_ordered_queues = !!options[:strict]
231
- @queues = options[:queues].map { |q| "queue:#{q}" }
232
- end
233
-
234
- def retrieve_work
235
- self.class.clean_working_queues! if take_lease
236
-
237
- retrieve_unit_of_work
248
+ Sidekiq.redis { |redis_conn| yield(redis_conn) }
238
249
  end
239
250
 
240
- def retrieve_unit_of_work
241
- raise NotImplementedError,
242
- "#{self.class} does not implement #{__method__}"
243
- end
244
-
245
- private
246
-
247
251
  def take_lease
248
252
  return unless allowed_to_take_a_lease?
249
253
 
@@ -6,23 +6,21 @@ module Sidekiq
6
6
  # we inject a regular sleep into the loop.
7
7
  RELIABLE_FETCH_IDLE_TIMEOUT = 5 # seconds
8
8
 
9
- attr_reader :queues_iterator, :queues_size
9
+ attr_reader :queues_size
10
10
 
11
11
  def initialize(options)
12
12
  super
13
13
 
14
+ @queues = queues.uniq if strictly_ordered_queues
14
15
  @queues_size = queues.size
15
- @queues_iterator = queues.cycle
16
16
  end
17
17
 
18
18
  private
19
19
 
20
20
  def retrieve_unit_of_work
21
- @queues_iterator.rewind if strictly_ordered_queues
22
-
23
- queues_size.times do
24
- queue = queues_iterator.next
21
+ queues_list = strictly_ordered_queues ? queues : queues.shuffle
25
22
 
23
+ queues_list.each do |queue|
26
24
  work = Sidekiq.redis do |conn|
27
25
  conn.rpoplpush(queue, self.class.working_queue_name(queue))
28
26
  end
@@ -39,14 +39,15 @@ describe Sidekiq::BaseReliableFetch do
39
39
  end
40
40
  end
41
41
 
42
- describe '.bulk_requeue' do
42
+ describe '#bulk_requeue' do
43
+ let(:options) { { queues: %w[foo bar] } }
43
44
  let!(:queue1) { Sidekiq::Queue.new('foo') }
44
45
  let!(:queue2) { Sidekiq::Queue.new('bar') }
45
46
 
46
47
  it 'requeues the bulk' do
47
48
  uow = described_class::UnitOfWork
48
49
  jobs = [ uow.new('queue:foo', job), uow.new('queue:foo', job), uow.new('queue:bar', job) ]
49
- described_class.bulk_requeue(jobs, queues: [])
50
+ described_class.new(options).bulk_requeue(jobs, nil)
50
51
 
51
52
  expect(queue1.size).to eq 2
52
53
  expect(queue2.size).to eq 1
@@ -56,7 +57,7 @@ describe Sidekiq::BaseReliableFetch do
56
57
  uow = described_class::UnitOfWork
57
58
  interrupted_job = Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo'], interrupted_count: 3)
58
59
  jobs = [ uow.new('queue:foo', interrupted_job), uow.new('queue:foo', job), uow.new('queue:bar', job) ]
59
- described_class.bulk_requeue(jobs, queues: [])
60
+ described_class.new(options).bulk_requeue(jobs, nil)
60
61
 
61
62
  expect(queue1.size).to eq 1
62
63
  expect(queue2.size).to eq 1
@@ -69,7 +70,7 @@ describe Sidekiq::BaseReliableFetch do
69
70
  uow = described_class::UnitOfWork
70
71
  interrupted_job = Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo'], interrupted_count: 3)
71
72
  jobs = [ uow.new('queue:foo', interrupted_job), uow.new('queue:foo', job), uow.new('queue:bar', job) ]
72
- described_class.bulk_requeue(jobs, queues: [])
73
+ described_class.new(options).bulk_requeue(jobs, nil)
73
74
 
74
75
  expect(queue1.size).to eq 2
75
76
  expect(queue2.size).to eq 1
@@ -80,7 +81,7 @@ describe Sidekiq::BaseReliableFetch do
80
81
  end
81
82
 
82
83
  it 'sets heartbeat' do
83
- config = double(:sidekiq_config, options: {})
84
+ config = double(:sidekiq_config, options: { queues: %w[foo bar] })
84
85
 
85
86
  heartbeat_thread = described_class.setup_reliable_fetch!(config)
86
87
 
@@ -5,102 +5,16 @@ shared_examples 'a Sidekiq fetcher' do
5
5
 
6
6
  describe '#retrieve_work' do
7
7
  let(:job) { Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo']) }
8
- let(:fetcher) { described_class.new(queues: ['assigned']) }
9
-
10
- it 'retrieves the job and puts it to working queue' do
11
- Sidekiq.redis { |conn| conn.rpush('queue:assigned', job) }
12
-
13
- uow = fetcher.retrieve_work
14
-
15
- expect(working_queue_size('assigned')).to eq 1
16
- expect(uow.queue_name).to eq 'assigned'
17
- expect(uow.job).to eq job
18
- expect(Sidekiq::Queue.new('assigned').size).to eq 0
19
- end
20
-
21
- it 'does not retrieve a job from foreign queue' do
22
- Sidekiq.redis { |conn| conn.rpush('queue:not_assigned', job) }
23
-
24
- expect(fetcher.retrieve_work).to be_nil
25
- end
26
-
27
- it 'requeues jobs from dead working queue with incremented interrupted_count' do
28
- Sidekiq.redis do |conn|
29
- conn.rpush(other_process_working_queue_name('assigned'), job)
30
- end
31
-
32
- expected_job = Sidekiq.load_json(job)
33
- expected_job['interrupted_count'] = 1
34
- expected_job = Sidekiq.dump_json(expected_job)
35
-
36
- uow = fetcher.retrieve_work
37
-
38
- expect(uow).to_not be_nil
39
- expect(uow.job).to eq expected_job
40
-
41
- Sidekiq.redis do |conn|
42
- expect(conn.llen(other_process_working_queue_name('assigned'))).to eq 0
43
- end
44
- end
45
-
46
- it 'ignores working queue keys in unknown formats' do
47
- # Add a spurious non-numeric char segment at the end; this simulates any other
48
- # incorrect form in general
49
- malformed_key = "#{other_process_working_queue_name('assigned')}:X"
50
- Sidekiq.redis do |conn|
51
- conn.rpush(malformed_key, job)
52
- end
53
-
54
- uow = fetcher.retrieve_work
55
-
56
- Sidekiq.redis do |conn|
57
- expect(conn.llen(malformed_key)).to eq 1
58
- end
59
- end
60
-
61
- it 'requeues jobs from legacy dead working queue with incremented interrupted_count' do
62
- Sidekiq.redis do |conn|
63
- conn.rpush(legacy_other_process_working_queue_name('assigned'), job)
64
- end
65
-
66
- expected_job = Sidekiq.load_json(job)
67
- expected_job['interrupted_count'] = 1
68
- expected_job = Sidekiq.dump_json(expected_job)
69
-
70
- uow = fetcher.retrieve_work
71
-
72
- expect(uow).to_not be_nil
73
- expect(uow.job).to eq expected_job
74
-
75
- Sidekiq.redis do |conn|
76
- expect(conn.llen(legacy_other_process_working_queue_name('assigned'))).to eq 0
77
- end
78
- end
79
-
80
- it 'does not requeue jobs from live working queue' do
81
- working_queue = live_other_process_working_queue_name('assigned')
82
-
83
- Sidekiq.redis do |conn|
84
- conn.rpush(working_queue, job)
85
- end
86
-
87
- uow = fetcher.retrieve_work
88
-
89
- expect(uow).to be_nil
90
-
91
- Sidekiq.redis do |conn|
92
- expect(conn.llen(working_queue)).to eq 1
93
- end
94
- end
8
+ let(:fetcher) { described_class.new(queues: queues) }
95
9
 
96
10
  it 'does not clean up orphaned jobs more than once per cleanup interval' do
97
11
  Sidekiq.redis = Sidekiq::RedisConnection.create(url: REDIS_URL, size: 10)
98
12
 
99
- expect(described_class).to receive(:clean_working_queues!).once
13
+ expect(fetcher).to receive(:clean_working_queues!).once
100
14
 
101
15
  threads = 10.times.map do
102
16
  Thread.new do
103
- described_class.new(queues: ['assigned']).retrieve_work
17
+ fetcher.retrieve_work
104
18
  end
105
19
  end
106
20
 
@@ -133,13 +47,34 @@ shared_examples 'a Sidekiq fetcher' do
133
47
  expect(jobs).to include 'this_job_should_not_stuck'
134
48
  end
135
49
 
136
- context 'with namespaced queues' do
137
- let (:queue) { 'namespace:assigned' }
50
+ shared_examples "basic queue handling" do |queue|
138
51
  let (:fetcher) { described_class.new(queues: [queue]) }
139
52
 
140
- it 'requeues jobs from dead namespaced working queue with incremented interrupted_count' do
53
+ it 'retrieves the job and puts it to working queue' do
54
+ Sidekiq.redis { |conn| conn.rpush("queue:#{queue}", job) }
55
+
56
+ uow = fetcher.retrieve_work
57
+
58
+ expect(working_queue_size(queue)).to eq 1
59
+ expect(uow.queue_name).to eq queue
60
+ expect(uow.job).to eq job
61
+ expect(Sidekiq::Queue.new(queue).size).to eq 0
62
+ end
63
+
64
+ it 'does not retrieve a job from foreign queue' do
65
+ Sidekiq.redis { |conn| conn.rpush("'queue:#{queue}:not", job) }
66
+ expect(fetcher.retrieve_work).to be_nil
67
+
68
+ Sidekiq.redis { |conn| conn.rpush("'queue:not_#{queue}", job) }
69
+ expect(fetcher.retrieve_work).to be_nil
70
+
71
+ Sidekiq.redis { |conn| conn.rpush("'queue:random_name", job) }
72
+ expect(fetcher.retrieve_work).to be_nil
73
+ end
74
+
75
+ it 'requeues jobs from legacy dead working queue with incremented interrupted_count' do
141
76
  Sidekiq.redis do |conn|
142
- conn.rpush(other_process_working_queue_name(queue), job)
77
+ conn.rpush(legacy_other_process_working_queue_name(queue), job)
143
78
  end
144
79
 
145
80
  expected_job = Sidekiq.load_json(job)
@@ -152,32 +87,26 @@ shared_examples 'a Sidekiq fetcher' do
152
87
  expect(uow.job).to eq expected_job
153
88
 
154
89
  Sidekiq.redis do |conn|
155
- expect(conn.llen(other_process_working_queue_name(queue))).to eq 0
90
+ expect(conn.llen(legacy_other_process_working_queue_name(queue))).to eq 0
156
91
  end
157
92
  end
158
93
 
159
- it 'does not requeue jobs in a namespaced queue from live working queue' do
160
- working_queue = live_other_process_working_queue_name(queue)
161
-
94
+ it 'ignores working queue keys in unknown formats' do
95
+ # Add a spurious non-numeric char segment at the end; this simulates any other
96
+ # incorrect form in general
97
+ malformed_key = "#{other_process_working_queue_name(queue)}:X"
162
98
  Sidekiq.redis do |conn|
163
- conn.rpush(working_queue, job)
99
+ conn.rpush(malformed_key, job)
164
100
  end
165
101
 
166
102
  uow = fetcher.retrieve_work
167
103
 
168
- expect(uow).to be_nil
169
-
170
104
  Sidekiq.redis do |conn|
171
- expect(conn.llen(working_queue)).to eq 1
105
+ expect(conn.llen(malformed_key)).to eq 1
172
106
  end
173
107
  end
174
- end
175
-
176
- context 'with deeper namespaced queues' do
177
- let (:queue) { 'deep:namespace:assigned' }
178
- let (:fetcher) { described_class.new(queues: [queue]) }
179
108
 
180
- it 'requeues jobs from dead namespaced working queue with incremented interrupted_count' do
109
+ it 'requeues jobs from dead working queue with incremented interrupted_count' do
181
110
  Sidekiq.redis do |conn|
182
111
  conn.rpush(other_process_working_queue_name(queue), job)
183
112
  end
@@ -196,7 +125,7 @@ shared_examples 'a Sidekiq fetcher' do
196
125
  end
197
126
  end
198
127
 
199
- it 'does not requeue jobs in a deeper namespaced queue from live working queue' do
128
+ it 'does not requeue jobs from live working queue' do
200
129
  working_queue = live_other_process_working_queue_name(queue)
201
130
 
202
131
  Sidekiq.redis do |conn|
@@ -213,6 +142,12 @@ shared_examples 'a Sidekiq fetcher' do
213
142
  end
214
143
  end
215
144
 
145
+ context 'with various queues' do
146
+ %w[assigned namespace:assigned namespace:deeper:assigned].each do |queue|
147
+ it_behaves_like "basic queue handling", queue
148
+ end
149
+ end
150
+
216
151
  context 'with short cleanup interval' do
217
152
  let(:short_interval) { 1 }
218
153
  let(:fetcher) { described_class.new(queues: queues, lease_interval: short_interval, cleanup_interval: short_interval) }
@@ -243,7 +178,6 @@ def legacy_other_process_working_queue_name(queue)
243
178
  "#{Sidekiq::BaseReliableFetch::WORKING_QUEUE_PREFIX}:queue:#{queue}:#{Socket.gethostname}:#{::Process.pid + 1}"
244
179
  end
245
180
 
246
-
247
181
  def other_process_working_queue_name(queue)
248
182
  "#{Sidekiq::BaseReliableFetch::WORKING_QUEUE_PREFIX}:queue:#{queue}:#{Socket.gethostname}:#{::Process.pid + 1}:#{::SecureRandom.hex(6)}"
249
183
  end
@@ -57,7 +57,7 @@ end
57
57
  def spawn_workers
58
58
  pids = []
59
59
  NUMBER_OF_WORKERS.times do
60
- pids << spawn('sidekiq -r ./config.rb')
60
+ pids << spawn('sidekiq -q default -q low -q high -r ./config.rb')
61
61
  end
62
62
 
63
63
  pids
@@ -8,19 +8,7 @@ class ReliabilityTestWorker
8
8
  sleep 1
9
9
 
10
10
  Sidekiq.redis do |redis|
11
- redis.lpush(REDIS_FINISHED_LIST, get_sidekiq_job_id)
11
+ redis.lpush(REDIS_FINISHED_LIST, jid)
12
12
  end
13
13
  end
14
-
15
- def get_sidekiq_job_id
16
- context_data = Thread.current[:sidekiq_context]&.first
17
-
18
- return unless context_data
19
-
20
- index = context_data.index('JID-')
21
-
22
- return unless index
23
-
24
- context_data[index + 4..-1]
25
- end
26
14
  end
@@ -11,7 +11,7 @@ def spawn_workers(number)
11
11
  pids = []
12
12
 
13
13
  number.times do
14
- pids << spawn('sidekiq -r ./config.rb')
14
+ pids << spawn('sidekiq -q default -q high -q low -r ./config.rb')
15
15
  end
16
16
 
17
17
  pids
metadata CHANGED
@@ -1,15 +1,15 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: gitlab-sidekiq-fetcher
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.5
4
+ version: 0.8.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - TEA
8
8
  - GitLab
9
- autorequire:
9
+ autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2021-02-24 00:00:00.000000000 Z
12
+ date: 2021-03-02 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: sidekiq
@@ -17,14 +17,14 @@ dependencies:
17
17
  requirements:
18
18
  - - "~>"
19
19
  - !ruby/object:Gem::Version
20
- version: '5'
20
+ version: '6.1'
21
21
  type: :runtime
22
22
  prerelease: false
23
23
  version_requirements: !ruby/object:Gem::Requirement
24
24
  requirements:
25
25
  - - "~>"
26
26
  - !ruby/object:Gem::Version
27
- version: '5'
27
+ version: '6.1'
28
28
  description: Redis reliable queue pattern implemented in Sidekiq
29
29
  email: valery@gitlab.com
30
30
  executables: []
@@ -63,7 +63,7 @@ homepage: https://gitlab.com/gitlab-org/sidekiq-reliable-fetch/
63
63
  licenses:
64
64
  - LGPL-3.0
65
65
  metadata: {}
66
- post_install_message:
66
+ post_install_message:
67
67
  rdoc_options: []
68
68
  require_paths:
69
69
  - lib
@@ -79,7 +79,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
79
79
  version: '0'
80
80
  requirements: []
81
81
  rubygems_version: 3.1.4
82
- signing_key:
82
+ signing_key:
83
83
  specification_version: 4
84
84
  summary: Reliable fetch extension for Sidekiq
85
85
  test_files: []