gitlab-sidekiq-fetcher 0.5.6 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 789f9e7424fe05ba56d1a5dc4aae2bd1cc750cb4c6ab3a0d87055876dec4ac63
4
- data.tar.gz: c17bca12b0e63b47c3725000de1430f69cfdef6c4b5b791e40deacc0da0e2b33
3
+ metadata.gz: 4815e3e75915230d7b2eaf2fe3fa0daa288ec4c670b2cd211cb659aff4838788
4
+ data.tar.gz: c8b491f2d1a2678ef40fe856a55bf89341e041070c40941ca693685fa3c048cf
5
5
  SHA512:
6
- metadata.gz: 9684d40185a5cd89a8ada02ae8563a0ae8a278866cfe59d11e5e2cc8cbcb610f29998dc4f9df0fc07720a723f6823c39e41f375542c0d9621fde87bbfccfb01c
7
- data.tar.gz: 146e99c8e8fd388d56dbbd099658dceb8210101df31ab3d362eeeefa3fd52e75e469df24e1b06be5adae2231c9c978a84f2e0681b3a0bc91b8b5f5798c941f96
6
+ metadata.gz: 1667fb3ffb47117ac3c756c07a85867d96871b5489e820823735b62912df0d82b4d6c10be478e1f291d637d2ed15aff99a958f4fd884df98f026ee8c8f9c4c83
7
+ data.tar.gz: 5dad56f30515be87e79c58c3b5dce301213d427fe579309bca2a525e2b8664b94f0763c7eabfb2443324c5d8ecf5403f9826a0772e3b09b74e215e72ebfc4226
data/Gemfile CHANGED
@@ -9,5 +9,4 @@ group :test do
9
9
  gem "pry"
10
10
  gem "sidekiq", '~> 5.0'
11
11
  gem 'simplecov', require: false
12
- gem 'stub_env', '~> 1.0'
13
12
  end
data/Gemfile.lock CHANGED
@@ -36,8 +36,6 @@ GEM
36
36
  json (>= 1.8, < 3)
37
37
  simplecov-html (~> 0.10.0)
38
38
  simplecov-html (0.10.2)
39
- stub_env (1.0.4)
40
- rspec (>= 2.0, < 4.0)
41
39
 
42
40
  PLATFORMS
43
41
  ruby
@@ -47,7 +45,6 @@ DEPENDENCIES
47
45
  rspec (~> 3)
48
46
  sidekiq (~> 5.0)
49
47
  simplecov
50
- stub_env (~> 1.0)
51
48
 
52
49
  BUNDLED WITH
53
50
  1.17.1
@@ -1,6 +1,6 @@
1
1
  Gem::Specification.new do |s|
2
2
  s.name = 'gitlab-sidekiq-fetcher'
3
- s.version = '0.5.6'
3
+ s.version = '0.6.0'
4
4
  s.authors = ['TEA', 'GitLab']
5
5
  s.email = 'valery@gitlab.com'
6
6
  s.license = 'LGPL-3.0'
@@ -10,5 +10,5 @@ Gem::Specification.new do |s|
10
10
  s.require_paths = ['lib']
11
11
  s.files = `git ls-files`.split($\)
12
12
  s.test_files = []
13
- s.add_dependency 'sidekiq', '~> 5'
13
+ s.add_dependency 'sidekiq', '>= 5', '< 7'
14
14
  end
@@ -21,10 +21,6 @@ module Sidekiq
21
21
  # How much time a job can be interrupted
22
22
  DEFAULT_MAX_RETRIES_AFTER_INTERRUPTION = 3
23
23
 
24
- # Regexes for matching working queue keys
25
- WORKING_QUEUE_REGEX = /#{WORKING_QUEUE_PREFIX}:(queue:.*):([^:]*:[0-9]*:[0-9a-f]*)\z/.freeze
26
- LEGACY_WORKING_QUEUE_REGEX = /#{WORKING_QUEUE_PREFIX}:(queue:.*):([^:]*:[0-9]*)\z/.freeze
27
-
28
24
  UnitOfWork = Struct.new(:queue, :job) do
29
25
  def acknowledge
30
26
  Sidekiq.redis { |conn| conn.lrem(Sidekiq::BaseReliableFetch.working_queue_name(queue), 1, job) }
@@ -45,12 +41,10 @@ module Sidekiq
45
41
  end
46
42
 
47
43
  def self.setup_reliable_fetch!(config)
48
- config.options[:fetch] = if config.options[:semi_reliable_fetch]
49
- Sidekiq::SemiReliableFetch
50
- else
51
- Sidekiq::ReliableFetch
52
- end
44
+ fetch = config.options[:semi_reliable_fetch] ? SemiReliableFetch : ReliableFetch
45
+ fetch = fetch.new(config.options) if Sidekiq::VERSION >= '6'
53
46
 
47
+ config.options[:fetch] = fetch
54
48
  Sidekiq.logger.info('GitLab reliable fetch activated!')
55
49
 
56
50
  start_heartbeat_thread
@@ -72,24 +66,24 @@ module Sidekiq
72
66
  end
73
67
  end
74
68
 
75
- def self.hostname
76
- Socket.gethostname
77
- end
78
-
79
- def self.process_nonce
80
- @@process_nonce ||= SecureRandom.hex(6)
69
+ def self.pid
70
+ @pid ||= ::Process.pid
81
71
  end
82
72
 
83
- def self.identity
84
- @@identity ||= "#{hostname}:#{$$}:#{process_nonce}"
73
+ def self.hostname
74
+ @hostname ||= Socket.gethostname
85
75
  end
86
76
 
87
77
  def self.heartbeat
88
78
  Sidekiq.redis do |conn|
89
- conn.set(heartbeat_key(identity), 1, ex: HEARTBEAT_LIFESPAN)
79
+ conn.set(heartbeat_key(hostname, pid), 1, ex: HEARTBEAT_LIFESPAN)
90
80
  end
91
81
 
92
- Sidekiq.logger.debug("Heartbeat for #{identity}")
82
+ Sidekiq.logger.debug("Heartbeat for hostname: #{hostname} and pid: #{pid}")
83
+ end
84
+
85
+ def bulk_requeue(inprogress, options)
86
+ self.class.bulk_requeue(inprogress, options)
93
87
  end
94
88
 
95
89
  def self.bulk_requeue(inprogress, _options)
@@ -108,7 +102,9 @@ module Sidekiq
108
102
  Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{e.message}")
109
103
  end
110
104
 
111
- def self.clean_working_queue!(original_queue, working_queue)
105
+ def self.clean_working_queue!(working_queue)
106
+ original_queue = working_queue.gsub(/#{WORKING_QUEUE_PREFIX}:|:[^:]*:[0-9]*\z/, '')
107
+
112
108
  Sidekiq.redis do |conn|
113
109
  while job = conn.rpop(working_queue)
114
110
  preprocess_interrupted_job(job, original_queue)
@@ -127,19 +123,6 @@ module Sidekiq
127
123
  end
128
124
  end
129
125
 
130
- def self.extract_queue_and_identity(key)
131
- # New identity format is "{hostname}:{pid}:{randomhex}
132
- # Old identity format is "{hostname}:{pid}"
133
- # Queue names may also have colons (namespaced).
134
- # Expressing this in a single regex is unreadable
135
-
136
- # Test the newer expected format first, only checking the older if necessary
137
- original_queue, identity = key.scan(WORKING_QUEUE_REGEX).flatten
138
- return original_queue, identity unless original_queue.nil? || identity.nil?
139
-
140
- key.scan(LEGACY_WORKING_QUEUE_REGEX).flatten
141
- end
142
-
143
126
  # Detect "old" jobs and requeue them because the worker they were assigned
144
127
  # to probably failed miserably.
145
128
  def self.clean_working_queues!
@@ -147,25 +130,26 @@ module Sidekiq
147
130
 
148
131
  Sidekiq.redis do |conn|
149
132
  conn.scan_each(match: "#{WORKING_QUEUE_PREFIX}:queue:*", count: SCAN_COUNT) do |key|
150
- original_queue, identity = extract_queue_and_identity(key)
133
+ # Example: "working:name_of_the_job:queue:{hostname}:{PID}"
134
+ hostname, pid = key.scan(/:([^:]*):([0-9]*)\z/).flatten
151
135
 
152
- next if original_queue.nil? || identity.nil?
136
+ continue if hostname.nil? || pid.nil?
153
137
 
154
- clean_working_queue!(original_queue, key) if worker_dead?(identity, conn)
138
+ clean_working_queue!(key) if worker_dead?(hostname, pid, conn)
155
139
  end
156
140
  end
157
141
  end
158
142
 
159
- def self.worker_dead?(identity, conn)
160
- !conn.get(heartbeat_key(identity))
143
+ def self.worker_dead?(hostname, pid, conn)
144
+ !conn.get(heartbeat_key(hostname, pid))
161
145
  end
162
146
 
163
- def self.heartbeat_key(identity)
164
- "reliable-fetcher-heartbeat-#{identity.gsub(':', '-')}"
147
+ def self.heartbeat_key(hostname, pid)
148
+ "reliable-fetcher-heartbeat-#{hostname}-#{pid}"
165
149
  end
166
150
 
167
151
  def self.working_queue_name(queue)
168
- "#{WORKING_QUEUE_PREFIX}:#{queue}:#{identity}"
152
+ "#{WORKING_QUEUE_PREFIX}:#{queue}:#{hostname}:#{pid}"
169
153
  end
170
154
 
171
155
  def self.interruption_exhausted?(msg)
@@ -5,14 +5,14 @@ module Sidekiq
5
5
  # We want the fetch operation to timeout every few seconds so the thread
6
6
  # can check if the process is shutting down. This constant is only used
7
7
  # for semi-reliable fetch.
8
- DEFAULT_SEMI_RELIABLE_FETCH_TIMEOUT = 2 # seconds
8
+ SEMI_RELIABLE_FETCH_TIMEOUT = 2 # seconds
9
9
 
10
10
  def initialize(options)
11
11
  super
12
12
 
13
13
  if strictly_ordered_queues
14
14
  @queues = @queues.uniq
15
- @queues << semi_reliable_fetch_timeout
15
+ @queues << SEMI_RELIABLE_FETCH_TIMEOUT
16
16
  end
17
17
  end
18
18
 
@@ -36,13 +36,9 @@ module Sidekiq
36
36
  @queues
37
37
  else
38
38
  queues = @queues.shuffle.uniq
39
- queues << semi_reliable_fetch_timeout
39
+ queues << SEMI_RELIABLE_FETCH_TIMEOUT
40
40
  queues
41
41
  end
42
42
  end
43
-
44
- def semi_reliable_fetch_timeout
45
- @semi_reliable_fetch_timeout ||= ENV['SIDEKIQ_SEMI_RELIABLE_FETCH_TIMEOUT']&.to_i || DEFAULT_SEMI_RELIABLE_FETCH_TIMEOUT
46
- end
47
43
  end
48
44
  end
@@ -80,14 +80,14 @@ describe Sidekiq::BaseReliableFetch do
80
80
  end
81
81
 
82
82
  it 'sets heartbeat' do
83
- config = double(:sidekiq_config, options: {})
83
+ config = double(:sidekiq_config, options: { queues: [] })
84
84
 
85
85
  heartbeat_thread = described_class.setup_reliable_fetch!(config)
86
86
 
87
87
  Sidekiq.redis do |conn|
88
88
  sleep 0.2 # Give the time to heartbeat thread to make a loop
89
89
 
90
- heartbeat_key = described_class.heartbeat_key(described_class.identity)
90
+ heartbeat_key = described_class.heartbeat_key(Socket.gethostname, ::Process.pid)
91
91
  heartbeat = conn.get(heartbeat_key)
92
92
 
93
93
  expect(heartbeat).not_to be_nil
@@ -35,7 +35,6 @@ shared_examples 'a Sidekiq fetcher' do
35
35
 
36
36
  uow = fetcher.retrieve_work
37
37
 
38
- expect(uow).to_not be_nil
39
38
  expect(uow.job).to eq expected_job
40
39
 
41
40
  Sidekiq.redis do |conn|
@@ -43,40 +42,6 @@ shared_examples 'a Sidekiq fetcher' do
43
42
  end
44
43
  end
45
44
 
46
- it 'ignores working queue keys in unknown formats' do
47
- # Add a spurious non-numeric char segment at the end; this simulates any other
48
- # incorrect form in general
49
- malformed_key = "#{other_process_working_queue_name('assigned')}:X"
50
- Sidekiq.redis do |conn|
51
- conn.rpush(malformed_key, job)
52
- end
53
-
54
- uow = fetcher.retrieve_work
55
-
56
- Sidekiq.redis do |conn|
57
- expect(conn.llen(malformed_key)).to eq 1
58
- end
59
- end
60
-
61
- it 'requeues jobs from legacy dead working queue with incremented interrupted_count' do
62
- Sidekiq.redis do |conn|
63
- conn.rpush(legacy_other_process_working_queue_name('assigned'), job)
64
- end
65
-
66
- expected_job = Sidekiq.load_json(job)
67
- expected_job['interrupted_count'] = 1
68
- expected_job = Sidekiq.dump_json(expected_job)
69
-
70
- uow = fetcher.retrieve_work
71
-
72
- expect(uow).to_not be_nil
73
- expect(uow.job).to eq expected_job
74
-
75
- Sidekiq.redis do |conn|
76
- expect(conn.llen(legacy_other_process_working_queue_name('assigned'))).to eq 0
77
- end
78
- end
79
-
80
45
  it 'does not requeue jobs from live working queue' do
81
46
  working_queue = live_other_process_working_queue_name('assigned')
82
47
 
@@ -132,104 +97,6 @@ shared_examples 'a Sidekiq fetcher' do
132
97
 
133
98
  expect(jobs).to include 'this_job_should_not_stuck'
134
99
  end
135
-
136
- context 'with namespaced queues' do
137
- let (:queue) { 'namespace:assigned' }
138
- let (:fetcher) { described_class.new(queues: [queue]) }
139
-
140
- it 'requeues jobs from dead namespaced working queue with incremented interrupted_count' do
141
- Sidekiq.redis do |conn|
142
- conn.rpush(other_process_working_queue_name(queue), job)
143
- end
144
-
145
- expected_job = Sidekiq.load_json(job)
146
- expected_job['interrupted_count'] = 1
147
- expected_job = Sidekiq.dump_json(expected_job)
148
-
149
- uow = fetcher.retrieve_work
150
-
151
- expect(uow).to_not be_nil
152
- expect(uow.job).to eq expected_job
153
-
154
- Sidekiq.redis do |conn|
155
- expect(conn.llen(other_process_working_queue_name(queue))).to eq 0
156
- end
157
- end
158
-
159
- it 'does not requeue jobs in a namespaced queue from live working queue' do
160
- working_queue = live_other_process_working_queue_name(queue)
161
-
162
- Sidekiq.redis do |conn|
163
- conn.rpush(working_queue, job)
164
- end
165
-
166
- uow = fetcher.retrieve_work
167
-
168
- expect(uow).to be_nil
169
-
170
- Sidekiq.redis do |conn|
171
- expect(conn.llen(working_queue)).to eq 1
172
- end
173
- end
174
- end
175
-
176
- context 'with deeper namespaced queues' do
177
- let (:queue) { 'deep:namespace:assigned' }
178
- let (:fetcher) { described_class.new(queues: [queue]) }
179
-
180
- it 'requeues jobs from dead namespaced working queue with incremented interrupted_count' do
181
- Sidekiq.redis do |conn|
182
- conn.rpush(other_process_working_queue_name(queue), job)
183
- end
184
-
185
- expected_job = Sidekiq.load_json(job)
186
- expected_job['interrupted_count'] = 1
187
- expected_job = Sidekiq.dump_json(expected_job)
188
-
189
- uow = fetcher.retrieve_work
190
-
191
- expect(uow).to_not be_nil
192
- expect(uow.job).to eq expected_job
193
-
194
- Sidekiq.redis do |conn|
195
- expect(conn.llen(other_process_working_queue_name(queue))).to eq 0
196
- end
197
- end
198
-
199
- it 'does not requeue jobs in a deeper namespaced queue from live working queue' do
200
- working_queue = live_other_process_working_queue_name(queue)
201
-
202
- Sidekiq.redis do |conn|
203
- conn.rpush(working_queue, job)
204
- end
205
-
206
- uow = fetcher.retrieve_work
207
-
208
- expect(uow).to be_nil
209
-
210
- Sidekiq.redis do |conn|
211
- expect(conn.llen(working_queue)).to eq 1
212
- end
213
- end
214
- end
215
-
216
- context 'with short cleanup interval' do
217
- let(:short_interval) { 1 }
218
- let(:fetcher) { described_class.new(queues: queues, lease_interval: short_interval, cleanup_interval: short_interval) }
219
-
220
- it 'requeues when there is no heartbeat' do
221
- Sidekiq.redis { |conn| conn.rpush('queue:assigned', job) }
222
- # Use of retrieve_work twice with a sleep ensures we have exercised the
223
- # `identity` method to create the working queue key name and that it
224
- # matches the patterns used in the cleanup
225
- uow = fetcher.retrieve_work
226
- sleep(short_interval + 1)
227
- uow = fetcher.retrieve_work
228
-
229
- # Will only receive a UnitOfWork if the job was detected as failed and requeued
230
- expect(uow).to_not be_nil
231
- end
232
- end
233
100
  end
234
101
  end
235
102
 
@@ -239,23 +106,17 @@ def working_queue_size(queue_name)
239
106
  end
240
107
  end
241
108
 
242
- def legacy_other_process_working_queue_name(queue)
243
- "#{Sidekiq::BaseReliableFetch::WORKING_QUEUE_PREFIX}:queue:#{queue}:#{Socket.gethostname}:#{::Process.pid + 1}"
244
- end
245
-
246
-
247
109
  def other_process_working_queue_name(queue)
248
- "#{Sidekiq::BaseReliableFetch::WORKING_QUEUE_PREFIX}:queue:#{queue}:#{Socket.gethostname}:#{::Process.pid + 1}:#{::SecureRandom.hex(6)}"
110
+ "#{Sidekiq::BaseReliableFetch::WORKING_QUEUE_PREFIX}:queue:#{queue}:#{Socket.gethostname}:#{::Process.pid + 1}"
249
111
  end
250
112
 
251
113
  def live_other_process_working_queue_name(queue)
252
114
  pid = ::Process.pid + 1
253
115
  hostname = Socket.gethostname
254
- nonce = SecureRandom.hex(6)
255
116
 
256
117
  Sidekiq.redis do |conn|
257
- conn.set(Sidekiq::BaseReliableFetch.heartbeat_key("#{hostname}-#{pid}-#{nonce}"), 1)
118
+ conn.set(Sidekiq::BaseReliableFetch.heartbeat_key(hostname, pid), 1)
258
119
  end
259
120
 
260
- "#{Sidekiq::BaseReliableFetch::WORKING_QUEUE_PREFIX}:queue:#{queue}:#{hostname}:#{pid}:#{nonce}"
121
+ "#{Sidekiq::BaseReliableFetch::WORKING_QUEUE_PREFIX}:queue:#{queue}:#{hostname}:#{pid}"
261
122
  end
@@ -5,39 +5,4 @@ require 'sidekiq/semi_reliable_fetch'
5
5
 
6
6
  describe Sidekiq::SemiReliableFetch do
7
7
  include_examples 'a Sidekiq fetcher'
8
-
9
- describe '#retrieve_work' do
10
- context 'timeout config' do
11
- let(:queues) { ['stuff_to_do'] }
12
- let(:fetcher) { described_class.new(queues: queues) }
13
-
14
- before do
15
- stub_env('SIDEKIQ_SEMI_RELIABLE_FETCH_TIMEOUT', timeout)
16
- end
17
-
18
- context 'when the timeout is not configured' do
19
- let(:timeout) { nil }
20
-
21
- it 'brpops with the default timeout timeout' do
22
- Sidekiq.redis do |connection|
23
- expect(connection).to receive(:brpop).with("queue:stuff_to_do", 2).once.and_call_original
24
-
25
- fetcher.retrieve_work
26
- end
27
- end
28
- end
29
-
30
- context 'when the timeout is set in the env' do
31
- let(:timeout) { '5' }
32
-
33
- it 'brpops with the default timeout timeout' do
34
- Sidekiq.redis do |connection|
35
- expect(connection).to receive(:brpop).with("queue:stuff_to_do", 5).once.and_call_original
36
-
37
- fetcher.retrieve_work
38
- end
39
- end
40
- end
41
- end
42
- end
43
8
  end
data/spec/spec_helper.rb CHANGED
@@ -3,7 +3,6 @@ require 'sidekiq/util'
3
3
  require 'sidekiq/api'
4
4
  require 'pry'
5
5
  require 'simplecov'
6
- require 'stub_env'
7
6
 
8
7
  SimpleCov.start
9
8
 
@@ -30,7 +29,6 @@ Sidekiq.logger.level = Logger::ERROR
30
29
  #
31
30
  # See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration
32
31
  RSpec.configure do |config|
33
- config.include StubEnv::Helpers
34
32
  # rspec-expectations config goes here. You can use an alternate
35
33
  # assertion/expectation library such as wrong or the stdlib/minitest
36
34
  # assertions if you prefer.
metadata CHANGED
@@ -1,30 +1,36 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: gitlab-sidekiq-fetcher
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.6
4
+ version: 0.6.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - TEA
8
8
  - GitLab
9
- autorequire:
9
+ autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2021-03-24 00:00:00.000000000 Z
12
+ date: 2020-07-22 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: sidekiq
16
16
  requirement: !ruby/object:Gem::Requirement
17
17
  requirements:
18
- - - "~>"
18
+ - - ">="
19
19
  - !ruby/object:Gem::Version
20
20
  version: '5'
21
+ - - "<"
22
+ - !ruby/object:Gem::Version
23
+ version: '7'
21
24
  type: :runtime
22
25
  prerelease: false
23
26
  version_requirements: !ruby/object:Gem::Requirement
24
27
  requirements:
25
- - - "~>"
28
+ - - ">="
26
29
  - !ruby/object:Gem::Version
27
30
  version: '5'
31
+ - - "<"
32
+ - !ruby/object:Gem::Version
33
+ version: '7'
28
34
  description: Redis reliable queue pattern implemented in Sidekiq
29
35
  email: valery@gitlab.com
30
36
  executables: []
@@ -63,7 +69,7 @@ homepage: https://gitlab.com/gitlab-org/sidekiq-reliable-fetch/
63
69
  licenses:
64
70
  - LGPL-3.0
65
71
  metadata: {}
66
- post_install_message:
72
+ post_install_message:
67
73
  rdoc_options: []
68
74
  require_paths:
69
75
  - lib
@@ -79,7 +85,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
79
85
  version: '0'
80
86
  requirements: []
81
87
  rubygems_version: 3.0.3
82
- signing_key:
88
+ signing_key:
83
89
  specification_version: 4
84
90
  summary: Reliable fetch extension for Sidekiq
85
91
  test_files: []