sidekiq-queue-throttled 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +53 -0
- data/LICENSE.txt +21 -0
- data/README.md +303 -0
- data/lib/sidekiq/queue_throttled/configuration.rb +53 -0
- data/lib/sidekiq/queue_throttled/job.rb +82 -0
- data/lib/sidekiq/queue_throttled/job_throttler.rb +155 -0
- data/lib/sidekiq/queue_throttled/middleware.rb +92 -0
- data/lib/sidekiq/queue_throttled/queue_limiter.rb +110 -0
- data/lib/sidekiq/queue_throttled/version.rb +7 -0
- data/lib/sidekiq/queue_throttled.rb +49 -0
- data/spec/examples.txt +110 -0
- data/spec/sidekiq/queue_throttled/configuration_spec.rb +145 -0
- data/spec/sidekiq/queue_throttled/job_spec.rb +181 -0
- data/spec/sidekiq/queue_throttled/job_throttler_spec.rb +365 -0
- data/spec/sidekiq/queue_throttled/middleware_spec.rb +280 -0
- data/spec/sidekiq/queue_throttled/queue_limiter_spec.rb +217 -0
- data/spec/spec_helper.rb +79 -0
- metadata +108 -0
@@ -0,0 +1,280 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'spec_helper'
|
4
|
+
|
5
|
+
RSpec.describe Sidekiq::QueueThrottled::Middleware do
|
6
|
+
let(:middleware) { described_class.new }
|
7
|
+
let(:worker) { double('worker', class: worker_class) }
|
8
|
+
let(:worker_class) { double('worker_class', name: 'TestWorker') }
|
9
|
+
let(:job) { { 'queue' => 'test_queue', 'args' => [123] } }
|
10
|
+
let(:queue) { 'test_queue' }
|
11
|
+
|
12
|
+
before do
|
13
|
+
Sidekiq::QueueThrottled.configuration.set_queue_limit('test_queue', 2)
|
14
|
+
end
|
15
|
+
|
16
|
+
describe '#call' do
|
17
|
+
context 'without queue limits or job throttling' do
|
18
|
+
it 'processes job normally' do
|
19
|
+
processed = false
|
20
|
+
middleware.call(worker, job, queue) do
|
21
|
+
processed = true
|
22
|
+
end
|
23
|
+
expect(processed).to be_truthy
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
context 'with queue limits' do
|
28
|
+
before do
|
29
|
+
Sidekiq::QueueThrottled.configuration.set_queue_limit('test_queue', 1)
|
30
|
+
end
|
31
|
+
|
32
|
+
it 'processes job when under limit' do
|
33
|
+
processed = false
|
34
|
+
middleware.call(worker, job, queue) do
|
35
|
+
processed = true
|
36
|
+
end
|
37
|
+
expect(processed).to be_truthy
|
38
|
+
end
|
39
|
+
|
40
|
+
it 'reschedules job when at limit' do
|
41
|
+
# First job should process
|
42
|
+
processed1 = false
|
43
|
+
middleware.call(worker, job, queue) do
|
44
|
+
processed1 = true
|
45
|
+
end
|
46
|
+
expect(processed1).to be_truthy
|
47
|
+
|
48
|
+
# Second job should be rescheduled
|
49
|
+
processed2 = false
|
50
|
+
middleware.call(worker, job, queue) do
|
51
|
+
processed2 = true
|
52
|
+
end
|
53
|
+
expect(processed2).to be_falsey
|
54
|
+
|
55
|
+
# Check that job was rescheduled
|
56
|
+
scheduled_jobs = Sidekiq.redis { |conn| conn.zrange('schedule', 0, -1) }
|
57
|
+
expect(scheduled_jobs).not_to be_empty
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
context 'with job throttling' do
|
62
|
+
let(:worker_class) do
|
63
|
+
create_test_job_class('ThrottledWorker') do
|
64
|
+
sidekiq_throttle(
|
65
|
+
concurrency: {
|
66
|
+
limit: 1,
|
67
|
+
key_suffix: ->(user_id) { user_id }
|
68
|
+
}
|
69
|
+
)
|
70
|
+
end
|
71
|
+
end
|
72
|
+
|
73
|
+
it 'processes job when under throttle limit' do
|
74
|
+
processed = false
|
75
|
+
middleware.call(worker, job, queue) do
|
76
|
+
processed = true
|
77
|
+
end
|
78
|
+
expect(processed).to be_truthy
|
79
|
+
end
|
80
|
+
|
81
|
+
it 'reschedules job when at throttle limit' do
|
82
|
+
# First job should process
|
83
|
+
processed1 = false
|
84
|
+
middleware.call(worker, job, queue) do
|
85
|
+
processed1 = true
|
86
|
+
end
|
87
|
+
expect(processed1).to be_truthy
|
88
|
+
|
89
|
+
# Second job with same user_id should also process (sequential, not concurrent)
|
90
|
+
processed2 = false
|
91
|
+
middleware.call(worker, job, queue) do
|
92
|
+
processed2 = true
|
93
|
+
end
|
94
|
+
expect(processed2).to be_truthy
|
95
|
+
|
96
|
+
# There should be no scheduled jobs
|
97
|
+
scheduled_jobs = Sidekiq.redis { |conn| conn.zrange('schedule', 0, -1) }
|
98
|
+
expect(scheduled_jobs).to be_empty
|
99
|
+
end
|
100
|
+
|
101
|
+
it 'allows different user_ids' do
|
102
|
+
# First job with user_id 123
|
103
|
+
processed1 = false
|
104
|
+
middleware.call(worker, job, queue) do
|
105
|
+
processed1 = true
|
106
|
+
end
|
107
|
+
expect(processed1).to be_truthy
|
108
|
+
|
109
|
+
# Second job with user_id 456 should process
|
110
|
+
job2 = job.merge('args' => [456])
|
111
|
+
processed2 = false
|
112
|
+
middleware.call(worker, job2, queue) do
|
113
|
+
processed2 = true
|
114
|
+
end
|
115
|
+
expect(processed2).to be_truthy
|
116
|
+
end
|
117
|
+
end
|
118
|
+
|
119
|
+
context 'with both queue limits and job throttling' do
|
120
|
+
let(:worker_class) do
|
121
|
+
create_test_job_class('ThrottledWorker') do
|
122
|
+
sidekiq_throttle(
|
123
|
+
concurrency: {
|
124
|
+
limit: 2,
|
125
|
+
key_suffix: ->(user_id) { user_id }
|
126
|
+
}
|
127
|
+
)
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
before do
|
132
|
+
Sidekiq::QueueThrottled.configuration.set_queue_limit('test_queue', 1)
|
133
|
+
end
|
134
|
+
|
135
|
+
it 'respects queue limit first' do
|
136
|
+
# First job should process
|
137
|
+
processed1 = false
|
138
|
+
middleware.call(worker, job, queue) do
|
139
|
+
processed1 = true
|
140
|
+
end
|
141
|
+
expect(processed1).to be_truthy
|
142
|
+
|
143
|
+
# Second job should be rescheduled due to queue limit (not job throttle)
|
144
|
+
processed2 = false
|
145
|
+
middleware.call(worker, job, queue) do
|
146
|
+
processed2 = true
|
147
|
+
end
|
148
|
+
expect(processed2).to be_falsey
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
context 'error handling' do
|
153
|
+
it 'releases locks even when job raises error' do
|
154
|
+
Sidekiq::QueueThrottled.configuration.set_queue_limit('test_queue', 1)
|
155
|
+
|
156
|
+
expect do
|
157
|
+
middleware.call(worker, job, queue) do
|
158
|
+
raise 'Job error'
|
159
|
+
end
|
160
|
+
end.to raise_error('Job error')
|
161
|
+
|
162
|
+
# Reset the limiter to simulate a fresh state for the next job
|
163
|
+
limiter = middleware.send(:get_queue_limiter, queue)
|
164
|
+
limiter.reset!
|
165
|
+
|
166
|
+
# Should be able to process another job after error
|
167
|
+
processed = false
|
168
|
+
middleware.call(worker, job, queue) do
|
169
|
+
processed = true
|
170
|
+
end
|
171
|
+
expect(processed).to be_truthy
|
172
|
+
end
|
173
|
+
|
174
|
+
it 'releases job throttle slots even when job raises error' do
|
175
|
+
worker_class = create_test_job_class('ThrottledWorker') do
|
176
|
+
sidekiq_throttle(
|
177
|
+
concurrency: {
|
178
|
+
limit: 1,
|
179
|
+
key_suffix: ->(user_id) { user_id }
|
180
|
+
}
|
181
|
+
)
|
182
|
+
end
|
183
|
+
worker = double('worker', class: worker_class)
|
184
|
+
|
185
|
+
expect do
|
186
|
+
middleware.call(worker, job, queue) do
|
187
|
+
raise 'Job error'
|
188
|
+
end
|
189
|
+
end.to raise_error('Job error')
|
190
|
+
|
191
|
+
# Should be able to process another job after error
|
192
|
+
processed = false
|
193
|
+
middleware.call(worker, job, queue) do
|
194
|
+
processed = true
|
195
|
+
end
|
196
|
+
expect(processed).to be_truthy
|
197
|
+
end
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
describe '#get_queue_limiter' do
|
202
|
+
it 'returns nil for queue without limit' do
|
203
|
+
limiter = middleware.send(:get_queue_limiter, 'non_existent_queue')
|
204
|
+
expect(limiter).to be_nil
|
205
|
+
end
|
206
|
+
|
207
|
+
it 'returns limiter for queue with limit' do
|
208
|
+
limiter = middleware.send(:get_queue_limiter, 'test_queue')
|
209
|
+
expect(limiter).to be_a(Sidekiq::QueueThrottled::QueueLimiter)
|
210
|
+
expect(limiter.queue_name).to eq('test_queue')
|
211
|
+
expect(limiter.limit).to eq(2)
|
212
|
+
end
|
213
|
+
|
214
|
+
it 'caches limiters' do
|
215
|
+
limiter1 = middleware.send(:get_queue_limiter, 'test_queue')
|
216
|
+
limiter2 = middleware.send(:get_queue_limiter, 'test_queue')
|
217
|
+
expect(limiter1).to eq(limiter2)
|
218
|
+
end
|
219
|
+
end
|
220
|
+
|
221
|
+
describe '#get_job_throttler' do
|
222
|
+
it 'returns nil for job without throttle config' do
|
223
|
+
throttler = middleware.send(:get_job_throttler, 'TestWorker')
|
224
|
+
expect(throttler).to be_nil
|
225
|
+
end
|
226
|
+
|
227
|
+
it 'returns throttler for job with throttle config' do
|
228
|
+
create_test_job_class('ThrottledWorker') do
|
229
|
+
sidekiq_throttle(
|
230
|
+
concurrency: {
|
231
|
+
limit: 1,
|
232
|
+
key_suffix: ->(user_id) { user_id }
|
233
|
+
}
|
234
|
+
)
|
235
|
+
end
|
236
|
+
|
237
|
+
throttler = middleware.send(:get_job_throttler, 'ThrottledWorker')
|
238
|
+
expect(throttler).to be_a(Sidekiq::QueueThrottled::JobThrottler)
|
239
|
+
expect(throttler.job_class).to eq('ThrottledWorker')
|
240
|
+
end
|
241
|
+
|
242
|
+
it 'caches throttlers' do
|
243
|
+
create_test_job_class('ThrottledWorker') do
|
244
|
+
sidekiq_throttle(
|
245
|
+
concurrency: {
|
246
|
+
limit: 1,
|
247
|
+
key_suffix: ->(user_id) { user_id }
|
248
|
+
}
|
249
|
+
)
|
250
|
+
end
|
251
|
+
|
252
|
+
throttler1 = middleware.send(:get_job_throttler, 'ThrottledWorker')
|
253
|
+
throttler2 = middleware.send(:get_job_throttler, 'ThrottledWorker')
|
254
|
+
expect(throttler1).to eq(throttler2)
|
255
|
+
end
|
256
|
+
end
|
257
|
+
|
258
|
+
describe '#reschedule_job' do
|
259
|
+
it 'adds job to schedule with delay' do
|
260
|
+
middleware.send(:reschedule_job, job, queue)
|
261
|
+
|
262
|
+
scheduled_jobs = Sidekiq.redis { |conn| conn.zrange('schedule', 0, -1) }
|
263
|
+
expect(scheduled_jobs).not_to be_empty
|
264
|
+
|
265
|
+
scheduled_job = JSON.parse(scheduled_jobs.first)
|
266
|
+
expect(scheduled_job['queue']).to eq(queue)
|
267
|
+
expect(scheduled_job['args']).to eq([123])
|
268
|
+
expect(scheduled_job['at']).to be > Time.now.to_f
|
269
|
+
end
|
270
|
+
|
271
|
+
it 'uses configured retry delay' do
|
272
|
+
Sidekiq::QueueThrottled.configuration.retry_delay = 10
|
273
|
+
middleware.send(:reschedule_job, job, queue)
|
274
|
+
|
275
|
+
scheduled_jobs = Sidekiq.redis { |conn| conn.zrange('schedule', 0, -1) }
|
276
|
+
scheduled_job = JSON.parse(scheduled_jobs.first)
|
277
|
+
expect(scheduled_job['at']).to be_within(1).of(Time.now.to_f + 10)
|
278
|
+
end
|
279
|
+
end
|
280
|
+
end
|
@@ -0,0 +1,217 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'spec_helper'
|
4
|
+
|
5
|
+
RSpec.describe Sidekiq::QueueThrottled::QueueLimiter do
|
6
|
+
let(:queue_name) { 'test_queue' }
|
7
|
+
let(:limit) { 3 }
|
8
|
+
let(:limiter) { described_class.new(queue_name, limit) }
|
9
|
+
|
10
|
+
describe '#initialize' do
|
11
|
+
it 'sets queue name and limit' do
|
12
|
+
expect(limiter.queue_name).to eq(queue_name)
|
13
|
+
expect(limiter.limit).to eq(limit)
|
14
|
+
end
|
15
|
+
|
16
|
+
it 'converts limit to integer' do
|
17
|
+
limiter = described_class.new(queue_name, '5')
|
18
|
+
expect(limiter.limit).to eq(5)
|
19
|
+
end
|
20
|
+
|
21
|
+
it 'uses provided redis connection' do
|
22
|
+
custom_redis = double('redis')
|
23
|
+
limiter = described_class.new(queue_name, limit, custom_redis)
|
24
|
+
expect(limiter.redis).to eq(custom_redis)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
describe '#acquire_lock' do
|
29
|
+
it 'acquires lock when under limit' do
|
30
|
+
lock_id = limiter.acquire_lock
|
31
|
+
expect(lock_id).to be_truthy
|
32
|
+
expect(limiter.current_count).to eq(1)
|
33
|
+
end
|
34
|
+
|
35
|
+
it 'acquires multiple locks up to limit' do
|
36
|
+
lock1 = limiter.acquire_lock
|
37
|
+
lock2 = limiter.acquire_lock
|
38
|
+
lock3 = limiter.acquire_lock
|
39
|
+
|
40
|
+
expect(lock1).to be_truthy
|
41
|
+
expect(lock2).to be_truthy
|
42
|
+
expect(lock3).to be_truthy
|
43
|
+
expect(limiter.current_count).to eq(3)
|
44
|
+
end
|
45
|
+
|
46
|
+
it 'fails to acquire lock when at limit' do
|
47
|
+
limiter.acquire_lock
|
48
|
+
limiter.acquire_lock
|
49
|
+
limiter.acquire_lock
|
50
|
+
|
51
|
+
lock4 = limiter.acquire_lock
|
52
|
+
expect(lock4).to be_falsey
|
53
|
+
expect(limiter.current_count).to eq(3)
|
54
|
+
end
|
55
|
+
|
56
|
+
it 'uses provided worker_id' do
|
57
|
+
worker_id = 'worker_123'
|
58
|
+
lock_id = limiter.acquire_lock(worker_id)
|
59
|
+
expect(lock_id).to include(worker_id)
|
60
|
+
end
|
61
|
+
|
62
|
+
it 'generates unique lock_id for each call' do
|
63
|
+
lock1 = limiter.acquire_lock
|
64
|
+
lock2 = limiter.acquire_lock
|
65
|
+
expect(lock1).not_to eq(lock2)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
describe '#release_lock' do
|
70
|
+
it 'releases lock and decrements counter' do
|
71
|
+
lock_id = limiter.acquire_lock
|
72
|
+
expect(limiter.current_count).to eq(1)
|
73
|
+
|
74
|
+
result = limiter.release_lock(lock_id)
|
75
|
+
expect(result).to be_truthy
|
76
|
+
expect(limiter.current_count).to eq(1)
|
77
|
+
end
|
78
|
+
|
79
|
+
it 'handles nil lock_id' do
|
80
|
+
result = limiter.release_lock(nil)
|
81
|
+
expect(result).to be_falsey
|
82
|
+
end
|
83
|
+
|
84
|
+
it 'handles non-existent lock_id' do
|
85
|
+
result = limiter.release_lock('non_existent')
|
86
|
+
expect(result).to be_truthy
|
87
|
+
expect(limiter.current_count).to eq(0)
|
88
|
+
end
|
89
|
+
|
90
|
+
it 'handles redis errors gracefully' do
|
91
|
+
allow(limiter.redis).to receive(:del).and_raise(Redis::BaseError.new('Connection error'))
|
92
|
+
|
93
|
+
lock_id = limiter.acquire_lock
|
94
|
+
result = limiter.release_lock(lock_id)
|
95
|
+
expect(result).to be_truthy
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
describe '#current_count' do
|
100
|
+
it 'returns 0 for new limiter' do
|
101
|
+
expect(limiter.current_count).to eq(0)
|
102
|
+
end
|
103
|
+
|
104
|
+
it 'returns correct count after acquiring locks' do
|
105
|
+
limiter.acquire_lock
|
106
|
+
limiter.acquire_lock
|
107
|
+
expect(limiter.current_count).to eq(2)
|
108
|
+
end
|
109
|
+
|
110
|
+
it 'returns correct count after releasing locks' do
|
111
|
+
lock1 = limiter.acquire_lock
|
112
|
+
limiter.acquire_lock
|
113
|
+
limiter.release_lock(lock1)
|
114
|
+
expect(limiter.current_count).to eq(2)
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
describe '#available_slots' do
|
119
|
+
it 'returns limit for new limiter' do
|
120
|
+
expect(limiter.available_slots).to eq(3)
|
121
|
+
end
|
122
|
+
|
123
|
+
it 'returns correct available slots after acquiring locks' do
|
124
|
+
limiter.acquire_lock
|
125
|
+
expect(limiter.available_slots).to eq(2)
|
126
|
+
end
|
127
|
+
|
128
|
+
it 'returns 0 when at limit' do
|
129
|
+
limiter.acquire_lock
|
130
|
+
limiter.acquire_lock
|
131
|
+
limiter.acquire_lock
|
132
|
+
expect(limiter.available_slots).to eq(0)
|
133
|
+
end
|
134
|
+
|
135
|
+
it 'never returns negative values' do
|
136
|
+
# Simulate a situation where counter might be higher than limit
|
137
|
+
allow(limiter.redis).to receive(:get).and_return('5')
|
138
|
+
expect(limiter.available_slots).to eq(0)
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
describe '#reset!' do
|
143
|
+
it 'resets counter and lock' do
|
144
|
+
limiter.acquire_lock
|
145
|
+
expect(limiter.current_count).to eq(1)
|
146
|
+
|
147
|
+
limiter.reset!
|
148
|
+
expect(limiter.current_count).to eq(0)
|
149
|
+
end
|
150
|
+
|
151
|
+
it 'allows acquiring locks after reset' do
|
152
|
+
limiter.acquire_lock
|
153
|
+
limiter.acquire_lock
|
154
|
+
limiter.acquire_lock
|
155
|
+
expect(limiter.current_count).to eq(3)
|
156
|
+
|
157
|
+
limiter.reset!
|
158
|
+
lock_id = limiter.acquire_lock
|
159
|
+
expect(lock_id).to be_truthy
|
160
|
+
expect(limiter.current_count).to eq(1)
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
describe 'concurrent access' do
|
165
|
+
it 'handles concurrent lock acquisitions' do
|
166
|
+
threads = []
|
167
|
+
lock_ids = []
|
168
|
+
|
169
|
+
5.times do
|
170
|
+
threads << Thread.new do
|
171
|
+
lock_id = limiter.acquire_lock
|
172
|
+
lock_ids << lock_id if lock_id
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
threads.each(&:join)
|
177
|
+
|
178
|
+
# Should only acquire 3 locks (the limit)
|
179
|
+
acquired_locks = lock_ids.compact
|
180
|
+
expect(acquired_locks.length).to eq(3)
|
181
|
+
expect(limiter.current_count).to eq(3)
|
182
|
+
end
|
183
|
+
|
184
|
+
it 'handles concurrent releases' do
|
185
|
+
lock_ids = []
|
186
|
+
3.times { lock_ids << limiter.acquire_lock }
|
187
|
+
|
188
|
+
threads = lock_ids.map do |lock_id|
|
189
|
+
Thread.new do
|
190
|
+
limiter.release_lock(lock_id)
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
threads.each(&:join)
|
195
|
+
# Counter should remain at 3 since we're using time-based limiting
|
196
|
+
expect(limiter.current_count).to eq(3)
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
describe 'redis key management' do
|
201
|
+
it 'uses correct redis key prefix' do
|
202
|
+
limiter.acquire_lock
|
203
|
+
|
204
|
+
# Check that the counter key exists with correct prefix
|
205
|
+
keys = limiter.redis.keys("sidekiq:queue_throttled:queue:#{queue_name}:*")
|
206
|
+
expect(keys).not_to be_empty
|
207
|
+
end
|
208
|
+
|
209
|
+
it 'sets TTL on counter keys' do
|
210
|
+
limiter.acquire_lock
|
211
|
+
|
212
|
+
counter_key = "#{Sidekiq::QueueThrottled.configuration.redis_key_prefix}:queue:#{queue_name}:counter"
|
213
|
+
ttl = limiter.redis.ttl(counter_key)
|
214
|
+
expect(ttl).to be > 0
|
215
|
+
end
|
216
|
+
end
|
217
|
+
end
|
data/spec/spec_helper.rb
ADDED
@@ -0,0 +1,79 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'bundler/setup'
|
4
|
+
require 'sidekiq/queue_throttled'
|
5
|
+
require 'rspec'
|
6
|
+
require 'timecop'
|
7
|
+
require 'pry'
|
8
|
+
|
9
|
+
# Set up real Redis with password for tests
|
10
|
+
redis_url = ENV['REDIS_URL'] || 'redis://:123123@localhost:6379/0'
|
11
|
+
|
12
|
+
# Configure Sidekiq to use Redis with password
|
13
|
+
Sidekiq.configure_client { |c| c.redis = { url: redis_url } }
|
14
|
+
Sidekiq.configure_server { |c| c.redis = { url: redis_url } }
|
15
|
+
Sidekiq::QueueThrottled.redis = Redis.new(url: redis_url)
|
16
|
+
|
17
|
+
# Configure RSpec
|
18
|
+
RSpec.configure do |config|
|
19
|
+
config.expect_with :rspec do |expectations|
|
20
|
+
expectations.include_chain_clauses_in_custom_matcher_descriptions = true
|
21
|
+
end
|
22
|
+
|
23
|
+
config.mock_with :rspec do |mocks|
|
24
|
+
mocks.verify_partial_doubles = true
|
25
|
+
end
|
26
|
+
|
27
|
+
config.shared_context_metadata_behavior = :apply_to_host_groups
|
28
|
+
config.filter_run_when_matching :focus
|
29
|
+
config.example_status_persistence_file_path = 'spec/examples.txt'
|
30
|
+
config.disable_monkey_patching!
|
31
|
+
config.warnings = true
|
32
|
+
|
33
|
+
config.order = :random
|
34
|
+
Kernel.srand config.seed
|
35
|
+
|
36
|
+
# Reset Redis before each test
|
37
|
+
config.before(:each) do
|
38
|
+
Sidekiq::QueueThrottled.redis.flushdb
|
39
|
+
Sidekiq::QueueThrottled.configuration = Sidekiq::QueueThrottled::Configuration.new
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# Helper methods for testing
|
44
|
+
module TestHelpers
|
45
|
+
def create_test_job_class(name = 'TestJob', &block)
|
46
|
+
klass = Class.new do
|
47
|
+
include Sidekiq::Job
|
48
|
+
include Sidekiq::QueueThrottled::Job
|
49
|
+
|
50
|
+
define_method(:perform) do |*args|
|
51
|
+
# Default implementation
|
52
|
+
end
|
53
|
+
|
54
|
+
class_eval(&block) if block_given?
|
55
|
+
end
|
56
|
+
|
57
|
+
# Set the class name for better error messages and constant lookup
|
58
|
+
klass.define_singleton_method(:name) { name }
|
59
|
+
|
60
|
+
# Store the class in a constant so it can be looked up
|
61
|
+
Object.const_set(name, klass) unless Object.const_defined?(name)
|
62
|
+
|
63
|
+
klass
|
64
|
+
end
|
65
|
+
|
66
|
+
def wait_for_condition(timeout = 5, &condition)
|
67
|
+
start_time = Time.now
|
68
|
+
while Time.now - start_time < timeout
|
69
|
+
return true if condition.call
|
70
|
+
|
71
|
+
sleep 0.1
|
72
|
+
end
|
73
|
+
false
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
RSpec.configure do |config|
|
78
|
+
config.include TestHelpers
|
79
|
+
end
|
metadata
ADDED
@@ -0,0 +1,108 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: sidekiq-queue-throttled
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 1.0.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Farid Mohammadi
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2025-06-30 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: concurrent-ruby
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - "~>"
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '1.1'
|
20
|
+
type: :runtime
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - "~>"
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '1.1'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: redis
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - "~>"
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '4.0'
|
34
|
+
type: :runtime
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - "~>"
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '4.0'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: sidekiq
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - "~>"
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '6.0'
|
48
|
+
type: :runtime
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - "~>"
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '6.0'
|
55
|
+
description: A production-ready Sidekiq gem that provides both queue-level concurrency
|
56
|
+
limits and job-level throttling capabilities, combining the best of sidekiq-limit_fetch
|
57
|
+
and sidekiq-throttled.
|
58
|
+
email:
|
59
|
+
- farid.workspace@gmail.com
|
60
|
+
executables: []
|
61
|
+
extensions: []
|
62
|
+
extra_rdoc_files: []
|
63
|
+
files:
|
64
|
+
- CHANGELOG.md
|
65
|
+
- LICENSE.txt
|
66
|
+
- README.md
|
67
|
+
- lib/sidekiq/queue_throttled.rb
|
68
|
+
- lib/sidekiq/queue_throttled/configuration.rb
|
69
|
+
- lib/sidekiq/queue_throttled/job.rb
|
70
|
+
- lib/sidekiq/queue_throttled/job_throttler.rb
|
71
|
+
- lib/sidekiq/queue_throttled/middleware.rb
|
72
|
+
- lib/sidekiq/queue_throttled/queue_limiter.rb
|
73
|
+
- lib/sidekiq/queue_throttled/version.rb
|
74
|
+
- spec/examples.txt
|
75
|
+
- spec/sidekiq/queue_throttled/configuration_spec.rb
|
76
|
+
- spec/sidekiq/queue_throttled/job_spec.rb
|
77
|
+
- spec/sidekiq/queue_throttled/job_throttler_spec.rb
|
78
|
+
- spec/sidekiq/queue_throttled/middleware_spec.rb
|
79
|
+
- spec/sidekiq/queue_throttled/queue_limiter_spec.rb
|
80
|
+
- spec/spec_helper.rb
|
81
|
+
homepage: https://github.com/faridmohammadi/sidekiq-queue-throttled
|
82
|
+
licenses:
|
83
|
+
- MIT
|
84
|
+
metadata:
|
85
|
+
homepage_uri: https://github.com/faridmohammadi/sidekiq-queue-throttled
|
86
|
+
source_code_uri: https://github.com/faridmohammadi/sidekiq-queue-throttled
|
87
|
+
changelog_uri: https://github.com/faridmohammadi/sidekiq-queue-throttled/blob/main/CHANGELOG.md
|
88
|
+
rubygems_mfa_required: 'true'
|
89
|
+
post_install_message:
|
90
|
+
rdoc_options: []
|
91
|
+
require_paths:
|
92
|
+
- lib
|
93
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
94
|
+
requirements:
|
95
|
+
- - ">="
|
96
|
+
- !ruby/object:Gem::Version
|
97
|
+
version: 2.7.0
|
98
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
99
|
+
requirements:
|
100
|
+
- - ">="
|
101
|
+
- !ruby/object:Gem::Version
|
102
|
+
version: '0'
|
103
|
+
requirements: []
|
104
|
+
rubygems_version: 3.5.11
|
105
|
+
signing_key:
|
106
|
+
specification_version: 4
|
107
|
+
summary: Sidekiq gem that combines queue-level limits with job-level throttling
|
108
|
+
test_files: []
|