kubra-sidekiq-throttled 1.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE.txt +23 -0
- data/README.adoc +416 -0
- data/lib/sidekiq/throttled/config.rb +66 -0
- data/lib/sidekiq/throttled/cooldown.rb +55 -0
- data/lib/sidekiq/throttled/errors.rb +8 -0
- data/lib/sidekiq/throttled/expirable_set.rb +70 -0
- data/lib/sidekiq/throttled/job.rb +143 -0
- data/lib/sidekiq/throttled/message.rb +32 -0
- data/lib/sidekiq/throttled/middlewares/server.rb +28 -0
- data/lib/sidekiq/throttled/patches/basic_fetch.rb +34 -0
- data/lib/sidekiq/throttled/patches/super_fetch.rb +39 -0
- data/lib/sidekiq/throttled/patches/throttled_retriever.rb +26 -0
- data/lib/sidekiq/throttled/registry.rb +120 -0
- data/lib/sidekiq/throttled/strategy/base.rb +25 -0
- data/lib/sidekiq/throttled/strategy/concurrency.lua +61 -0
- data/lib/sidekiq/throttled/strategy/concurrency.rb +127 -0
- data/lib/sidekiq/throttled/strategy/threshold.lua +14 -0
- data/lib/sidekiq/throttled/strategy/threshold.rb +104 -0
- data/lib/sidekiq/throttled/strategy.rb +213 -0
- data/lib/sidekiq/throttled/strategy_collection.rb +73 -0
- data/lib/sidekiq/throttled/version.rb +8 -0
- data/lib/sidekiq/throttled/web/stats.rb +75 -0
- data/lib/sidekiq/throttled/web/throttled.html.erb +35 -0
- data/lib/sidekiq/throttled/web.rb +43 -0
- data/lib/sidekiq/throttled/worker.rb +13 -0
- data/lib/sidekiq/throttled.rb +116 -0
- metadata +119 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 0c8b57e15f1580876256ff151b057c188cea5816b035add8a9ee758031c54b79
|
4
|
+
data.tar.gz: 0fdcbdf93cf808e475f7a01c41658edc08d7e9e256eb7581d793fb72a87a4cf0
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 4a59f04af06b3a5c97f331fe4963d0ab769e5496a04c47bb139290b1ee0b9f8b8e183bd0c5b32a1b03e1115bc04332a13cfb0cb496b51b8a21d8c503e84e95c4
|
7
|
+
data.tar.gz: 7860e9a98c711808b662121e219991636225e39708cbc7e81045211d172723a8fe61474deba3a2116465363d715cc9237aa4a714d86ccbed6f6f5c1f33a07e65
|
data/LICENSE.txt
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2022 Alexey Zapparov
|
4
|
+
Copyright (c) 2020-2021 Alexey Zapparov, SensorTower Inc.
|
5
|
+
Copyright (c) 2015-2020 SensorTower Inc.
|
6
|
+
|
7
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
8
|
+
of this software and associated documentation files (the "Software"), to deal
|
9
|
+
in the Software without restriction, including without limitation the rights
|
10
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
11
|
+
copies of the Software, and to permit persons to whom the Software is
|
12
|
+
furnished to do so, subject to the following conditions:
|
13
|
+
|
14
|
+
The above copyright notice and this permission notice shall be included in
|
15
|
+
all copies or substantial portions of the Software.
|
16
|
+
|
17
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
18
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
19
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
20
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
21
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
22
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
23
|
+
THE SOFTWARE.
|
data/README.adoc
ADDED
@@ -0,0 +1,416 @@
|
|
1
|
+
= Sidekiq::Throttled
|
2
|
+
:ci-link: https://github.com/ixti/sidekiq-throttled/actions/workflows/ci.yml
|
3
|
+
:ci-badge: https://img.shields.io/github/actions/workflow/status/ixti/sidekiq-throttled/ci.yml?branch=main&style=for-the-badge
|
4
|
+
:gem-link: http://rubygems.org/gems/sidekiq-throttled
|
5
|
+
:gem-badge: https://img.shields.io/gem/v/sidekiq-throttled?style=for-the-badge
|
6
|
+
:doc-link: http://www.rubydoc.info/gems/sidekiq-throttled
|
7
|
+
:doc-badge: https://img.shields.io/badge/Documentation-API-blue?style=for-the-badge
|
8
|
+
|
9
|
+
****
|
10
|
+
{ci-link}[image:{ci-badge}[CI Status]]
|
11
|
+
{gem-link}[image:{gem-badge}[Latest Version]]
|
12
|
+
{doc-link}[image:{doc-badge}[API Documentation]]
|
13
|
+
****
|
14
|
+
|
15
|
+
Concurrency and threshold throttling for https://github.com/sidekiq/sidekiq[Sidekiq].
|
16
|
+
|
17
|
+
== Installation
|
18
|
+
|
19
|
+
Add this line to your application's Gemfile:
|
20
|
+
|
21
|
+
[source,ruby]
|
22
|
+
----
|
23
|
+
gem "sidekiq-throttled"
|
24
|
+
----
|
25
|
+
|
26
|
+
And then execute:
|
27
|
+
|
28
|
+
$ bundle
|
29
|
+
|
30
|
+
Or install it yourself as:
|
31
|
+
|
32
|
+
$ gem install sidekiq-throttled
|
33
|
+
|
34
|
+
== Usage
|
35
|
+
|
36
|
+
Add somewhere in your app's bootstrap (e.g. `config/initializers/sidekiq.rb` if
|
37
|
+
you are using Rails):
|
38
|
+
|
39
|
+
[source,ruby]
|
40
|
+
----
|
41
|
+
require "sidekiq/throttled"
|
42
|
+
----
|
43
|
+
|
44
|
+
Once you've done that you can include `Sidekiq::Throttled::Job` to your
|
45
|
+
job classes and configure throttling:
|
46
|
+
|
47
|
+
[source,ruby]
|
48
|
+
----
|
49
|
+
class MyJob
|
50
|
+
include Sidekiq::Job
|
51
|
+
include Sidekiq::Throttled::Job
|
52
|
+
|
53
|
+
sidekiq_options :queue => :my_queue
|
54
|
+
|
55
|
+
sidekiq_throttle(
|
56
|
+
# Allow maximum 10 concurrent jobs of this class at a time.
|
57
|
+
concurrency: { limit: 10 },
|
58
|
+
# Allow maximum 1K jobs being processed within one hour window.
|
59
|
+
threshold: { limit: 1_000, period: 1.hour }
|
60
|
+
)
|
61
|
+
|
62
|
+
def perform
|
63
|
+
# ...
|
64
|
+
end
|
65
|
+
end
|
66
|
+
----
|
67
|
+
|
68
|
+
TIP: `Sidekiq::Throttled::Job` is aliased as `Sidekiq::Throttled::Worker`,
|
69
|
+
thus if you're using `Sidekiq::Worker` naming convention, you can use the
|
70
|
+
alias for consistency:
|
71
|
+
|
72
|
+
[source,ruby]
|
73
|
+
----
|
74
|
+
class MyWorker
|
75
|
+
include Sidekiq::Worker
|
76
|
+
include Sidekiq::Throttled::Worker
|
77
|
+
|
78
|
+
# ...
|
79
|
+
end
|
80
|
+
----
|
81
|
+
|
82
|
+
|
83
|
+
=== Web UI
|
84
|
+
|
85
|
+
To add a Throttled tab to your sidekiq web dashboard, require it durring your
|
86
|
+
application initialization.
|
87
|
+
|
88
|
+
[source,ruby]
|
89
|
+
----
|
90
|
+
require "sidekiq/throttled/web"
|
91
|
+
----
|
92
|
+
|
93
|
+
|
94
|
+
=== Configuration
|
95
|
+
|
96
|
+
[source,ruby]
|
97
|
+
----
|
98
|
+
Sidekiq::Throttled.configure do |config|
|
99
|
+
# Period in seconds to exclude queue from polling in case it returned
|
100
|
+
# {config.cooldown_threshold} amount of throttled jobs in a row. Set
|
101
|
+
# this value to `nil` to disable cooldown manager completely.
|
102
|
+
# Default: 1.0
|
103
|
+
config.cooldown_period = 1.0
|
104
|
+
|
105
|
+
# Exclude queue from polling after it returned given amount of throttled
|
106
|
+
# jobs in a row.
|
107
|
+
# Default: 100 (cooldown after hundredth throttled job in a row)
|
108
|
+
config.cooldown_threshold = 100
|
109
|
+
end
|
110
|
+
----
|
111
|
+
|
112
|
+
[WARNING]
|
113
|
+
.Cooldown Settings
|
114
|
+
====
|
115
|
+
If a queue contains a thousand jobs in a row that will be throttled,
|
116
|
+
the cooldown will kick-in 10 times in a row, meaning it will take 10 seconds
|
117
|
+
before all those jobs are put back at the end of the queue and you actually
|
118
|
+
start processing other jobs.
|
119
|
+
|
120
|
+
You may want to adjust the cooldown_threshold and cooldown_period,
|
121
|
+
keeping in mind that this will also impact the load on your Redis server.
|
122
|
+
====
|
123
|
+
|
124
|
+
==== Middleware(s)
|
125
|
+
|
126
|
+
`Sidekiq::Throttled` relies on following bundled middlewares:
|
127
|
+
|
128
|
+
* `Sidekiq::Throttled::Middlewares::Server`
|
129
|
+
|
130
|
+
The middleware is automatically injected when you require `sidekiq/throttled`.
|
131
|
+
In rare cases, when this causes an issue, you can change middleware order manually:
|
132
|
+
|
133
|
+
[source,ruby]
|
134
|
+
----
|
135
|
+
Sidekiq.configure_server do |config|
|
136
|
+
# ...
|
137
|
+
|
138
|
+
config.server_middleware do |chain|
|
139
|
+
chain.prepend(Sidekiq::Throttled::Middlewares::Server)
|
140
|
+
end
|
141
|
+
end
|
142
|
+
----
|
143
|
+
|
144
|
+
See: https://github.com/sidekiq/sidekiq/blob/main/lib/sidekiq/middleware/chain.rb
|
145
|
+
|
146
|
+
|
147
|
+
=== Observer
|
148
|
+
|
149
|
+
You can specify an observer that will be called on throttling. To do so pass an
|
150
|
+
`:observer` option with callable object:
|
151
|
+
|
152
|
+
[source,ruby]
|
153
|
+
----
|
154
|
+
class MyJob
|
155
|
+
include Sidekiq::Job
|
156
|
+
include Sidekiq::Throttled::Job
|
157
|
+
|
158
|
+
MY_OBSERVER = lambda do |strategy, *args|
|
159
|
+
# do something
|
160
|
+
end
|
161
|
+
|
162
|
+
sidekiq_options queue: :my_queue
|
163
|
+
|
164
|
+
sidekiq_throttle(
|
165
|
+
concurrency: { limit: 10 },
|
166
|
+
threshold: { limit: 100, period: 1.hour },
|
167
|
+
observer: MY_OBSERVER
|
168
|
+
)
|
169
|
+
|
170
|
+
def perform(*args)
|
171
|
+
# ...
|
172
|
+
end
|
173
|
+
end
|
174
|
+
----
|
175
|
+
|
176
|
+
Observer will receive `strategy, *args` arguments, where `strategy` is a Symbol
|
177
|
+
`:concurrency` or `:threshold`, and `*args` are the arguments that were passed
|
178
|
+
to the job.
|
179
|
+
|
180
|
+
|
181
|
+
=== Dynamic throttling
|
182
|
+
|
183
|
+
You can throttle jobs dynamically with `:key_suffix` option:
|
184
|
+
|
185
|
+
[source,ruby]
|
186
|
+
----
|
187
|
+
class MyJob
|
188
|
+
include Sidekiq::Job
|
189
|
+
include Sidekiq::Throttled::Job
|
190
|
+
|
191
|
+
sidekiq_options queue: :my_queue
|
192
|
+
|
193
|
+
sidekiq_throttle(
|
194
|
+
# Allow maximum 10 concurrent jobs per user at a time.
|
195
|
+
concurrency: { limit: 10, key_suffix: -> (user_id) { user_id } }
|
196
|
+
)
|
197
|
+
|
198
|
+
def perform(user_id)
|
199
|
+
# ...
|
200
|
+
end
|
201
|
+
end
|
202
|
+
----
|
203
|
+
|
204
|
+
You can also supply dynamic values for limits and periods by supplying a proc
|
205
|
+
for these values. The proc will be evaluated at the time the job is fetched
|
206
|
+
and will receive the same arguments that are passed to the job.
|
207
|
+
|
208
|
+
[source,ruby]
|
209
|
+
----
|
210
|
+
class MyJob
|
211
|
+
include Sidekiq::Job
|
212
|
+
include Sidekiq::Throttled::Job
|
213
|
+
|
214
|
+
sidekiq_options queue: :my_queue
|
215
|
+
|
216
|
+
sidekiq_throttle(
|
217
|
+
# Allow maximum 1000 concurrent jobs of this class at a time for VIPs and 10 for all other users.
|
218
|
+
concurrency: {
|
219
|
+
limit: ->(user_id) { User.vip?(user_id) ? 1_000 : 10 },
|
220
|
+
key_suffix: ->(user_id) { User.vip?(user_id) ? "vip" : "std" }
|
221
|
+
},
|
222
|
+
# Allow 1000 jobs/hour to be processed for VIPs and 10/day for all others
|
223
|
+
threshold: {
|
224
|
+
limit: ->(user_id) { User.vip?(user_id) ? 1_000 : 10 },
|
225
|
+
period: ->(user_id) { User.vip?(user_id) ? 1.hour : 1.day },
|
226
|
+
key_suffix: ->(user_id) { User.vip?(user_id) ? "vip" : "std" }
|
227
|
+
}
|
228
|
+
)
|
229
|
+
|
230
|
+
def perform(user_id)
|
231
|
+
# ...
|
232
|
+
end
|
233
|
+
end
|
234
|
+
----
|
235
|
+
|
236
|
+
You also can use several different keys to throttle one worker.
|
237
|
+
|
238
|
+
[source,ruby]
|
239
|
+
----
|
240
|
+
class MyJob
|
241
|
+
include Sidekiq::Job
|
242
|
+
include Sidekiq::Throttled::Job
|
243
|
+
|
244
|
+
sidekiq_options queue: :my_queue
|
245
|
+
|
246
|
+
sidekiq_throttle(
|
247
|
+
# Allow maximum 10 concurrent jobs per project at a time and maximum 2 jobs per user
|
248
|
+
concurrency: [
|
249
|
+
{ limit: 10, key_suffix: -> (project_id, user_id) { project_id } },
|
250
|
+
{ limit: 2, key_suffix: -> (project_id, user_id) { user_id } }
|
251
|
+
]
|
252
|
+
# For :threshold it works the same
|
253
|
+
)
|
254
|
+
|
255
|
+
def perform(project_id, user_id)
|
256
|
+
# ...
|
257
|
+
end
|
258
|
+
end
|
259
|
+
----
|
260
|
+
|
261
|
+
IMPORTANT: Don't forget to specify `:key_suffix` and make it return different
|
262
|
+
values if you are using dynamic limit/period options. Otherwise, you risk
|
263
|
+
getting into some trouble.
|
264
|
+
|
265
|
+
[source,ruby]
|
266
|
+
----
|
267
|
+
class MyJob
|
268
|
+
include Sidekiq::Job
|
269
|
+
include Sidekiq::Throttled::Job
|
270
|
+
|
271
|
+
sidekiq_options queue: :my_queue
|
272
|
+
|
273
|
+
sidekiq_throttle(
|
274
|
+
concurrency: { limit: 10 },
|
275
|
+
# Allow 500 jobs per minute, 5,000 per hour, and 50,000 per day:
|
276
|
+
threshold: [
|
277
|
+
{ limit: 500, period: 1.minute, key_suffix: "minutely" },
|
278
|
+
{ limit: 5_000, period: 1.hour, key_suffix: "hourly" },
|
279
|
+
{ limit: 50_000, period: 1.day, key_suffix: "daily" },
|
280
|
+
]
|
281
|
+
)
|
282
|
+
|
283
|
+
def perform(project_id, user_id)
|
284
|
+
# ...
|
285
|
+
end
|
286
|
+
end
|
287
|
+
----
|
288
|
+
|
289
|
+
NOTE: `key_suffix` does not have to be a proc/lambda, it can just be a
|
290
|
+
string value. This can come in handy to set throttle limits for different
|
291
|
+
ranges of time
|
292
|
+
|
293
|
+
=== Concurrency throttling fine-tuning
|
294
|
+
|
295
|
+
Concurrency throttling is based on distributed locks. Those locks have default
|
296
|
+
time to live (TTL) set to 15 minutes. If your job takes more than 15 minutes
|
297
|
+
to finish, lock will be released and you might end up with more jobs running
|
298
|
+
concurrently than you expect.
|
299
|
+
|
300
|
+
This is done to avoid deadlocks - when by any reason (e.g. Sidekiq process was
|
301
|
+
OOM-killed) cleanup middleware wasn't executed and locks were not released.
|
302
|
+
|
303
|
+
If your job takes more than 15 minutes to complete, you can tune concurrency
|
304
|
+
lock TTL to fit your needs:
|
305
|
+
|
306
|
+
[source,ruby]
|
307
|
+
----
|
308
|
+
# Set concurrency strategy lock TTL to 1 hour.
|
309
|
+
sidekiq_throttle(concurrency: { limit: 20, ttl: 1.hour.to_i })
|
310
|
+
----
|
311
|
+
|
312
|
+
=== Scheduling based concurrency tuning
|
313
|
+
|
314
|
+
The default concurrency throttling algorithm immediately requeues throttled
|
315
|
+
jobs. This can lead to a lot of wasted work picking up the same set of still
|
316
|
+
throttled jobs repeatedly. This churn also often starves lower priority
|
317
|
+
jobs/queues. The `:schedule` requeue strategy delays checking the runability of
|
318
|
+
throttled jobs until likely to be runnable. This future time is estimated based
|
319
|
+
on the expected runtime of the job and current number of throttled jobs. This
|
320
|
+
eliminates -- or greatly reduces -- the negative impacts to non-throttled job
|
321
|
+
types and queues and reduces wasted work constantly rechecking the same still
|
322
|
+
throttled jobs.
|
323
|
+
|
324
|
+
Config items:
|
325
|
+
* limit - max number of this job to run simultaneously
|
326
|
+
* avg_job_duration - expected runtime in seconds of this type of job. Pick a
|
327
|
+
value on the high-side of plausible. Under heavy load values less than the
|
328
|
+
actual average will lead to sub-optimal delays in job processing.
|
329
|
+
* lost_job_threshold - duration in seconds of a job's lease on it's concurrency slot
|
330
|
+
* ttl - alias for lost_job_threshold
|
331
|
+
|
332
|
+
[source,ruby]
|
333
|
+
---
|
334
|
+
sidekiq_throttle(
|
335
|
+
concurrency: {
|
336
|
+
# only run 10 of this job at a time
|
337
|
+
limit: 10,
|
338
|
+
|
339
|
+
# these jobs finish in less that 30 seconds
|
340
|
+
avg_job_duration: 30,
|
341
|
+
|
342
|
+
# if it doesn't release it's lease in 2 minutes it's never going to
|
343
|
+
lost_job_threshold: 120
|
344
|
+
},
|
345
|
+
requeue: { with: :schedule }
|
346
|
+
)
|
347
|
+
---
|
348
|
+
|
349
|
+
|
350
|
+
== Supported Ruby Versions
|
351
|
+
|
352
|
+
This library aims to support and is tested against the following Ruby versions:
|
353
|
+
|
354
|
+
* Ruby 2.7.x
|
355
|
+
* Ruby 3.0.x
|
356
|
+
* Ruby 3.1.x
|
357
|
+
* Ruby 3.2.x
|
358
|
+
* Ruby 3.3.x
|
359
|
+
|
360
|
+
If something doesn't work on one of these versions, it's a bug.
|
361
|
+
|
362
|
+
This library may inadvertently work (or seem to work) on other Ruby versions,
|
363
|
+
however support will only be provided for the versions listed above.
|
364
|
+
|
365
|
+
If you would like this library to support another Ruby version or
|
366
|
+
implementation, you may volunteer to be a maintainer. Being a maintainer
|
367
|
+
entails making sure all tests run and pass on that implementation. When
|
368
|
+
something breaks on your implementation, you will be responsible for providing
|
369
|
+
patches in a timely fashion. If critical issues for a particular implementation
|
370
|
+
exist at the time of a major release, support for that Ruby version may be
|
371
|
+
dropped.
|
372
|
+
|
373
|
+
|
374
|
+
== Supported Sidekiq Versions
|
375
|
+
|
376
|
+
This library aims to support and work with following Sidekiq versions:
|
377
|
+
|
378
|
+
* Sidekiq 7.0.x
|
379
|
+
* Sidekiq 7.1.x
|
380
|
+
* Sidekiq 7.2.x
|
381
|
+
|
382
|
+
And the following Sidekiq Pro versions:
|
383
|
+
|
384
|
+
* Sidekiq Pro 7.0.x
|
385
|
+
* Sidekiq Pro 7.1.x
|
386
|
+
* Sidekiq Pro 7.2.x
|
387
|
+
|
388
|
+
== Development
|
389
|
+
|
390
|
+
bundle install
|
391
|
+
bundle exec appraisal generate
|
392
|
+
bundle exec appraisal install
|
393
|
+
bundle exec rake
|
394
|
+
|
395
|
+
=== Sidekiq-Pro
|
396
|
+
|
397
|
+
If you're working on Sidekiq-Pro support make sure that you have Sidekiq-Pro
|
398
|
+
license set either in the global config, or in `BUNDLE_GEMS\__CONTRIBSYS__COM`
|
399
|
+
environment variable.
|
400
|
+
|
401
|
+
== Contributing
|
402
|
+
|
403
|
+
* Fork sidekiq-throttled on GitHub
|
404
|
+
* Make your changes
|
405
|
+
* Ensure all tests pass (`bundle exec rake`)
|
406
|
+
* Send a pull request
|
407
|
+
* If we like them we'll merge them
|
408
|
+
* If we've accepted a patch, feel free to ask for commit access!
|
409
|
+
|
410
|
+
|
411
|
+
== Endorsement
|
412
|
+
|
413
|
+
https://github.com/sensortower[image:sensortower.svg[SensorTower]]
|
414
|
+
|
415
|
+
The initial work on the project was initiated to address the needs of
|
416
|
+
https://github.com/sensortower[SensorTower].
|
@@ -0,0 +1,66 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
module Throttled
|
5
|
+
# Configuration object.
|
6
|
+
class Config
|
7
|
+
# Period in seconds to exclude queue from polling in case it returned
|
8
|
+
# {#cooldown_threshold} amount of throttled jobs in a row.
|
9
|
+
#
|
10
|
+
# Set this to `nil` to disable cooldown completely.
|
11
|
+
#
|
12
|
+
# @return [Float, nil]
|
13
|
+
attr_reader :cooldown_period
|
14
|
+
|
15
|
+
# Amount of throttled jobs returned from the queue subsequently after
|
16
|
+
# which queue will be excluded from polling for the durations of
|
17
|
+
# {#cooldown_period}.
|
18
|
+
#
|
19
|
+
# @return [Integer]
|
20
|
+
attr_reader :cooldown_threshold
|
21
|
+
|
22
|
+
# Specifies how we should return throttled jobs to the queue so they can be executed later.
|
23
|
+
# Expects a hash with keys that may include :with and :to
|
24
|
+
# For :with, options are `:enqueue` (put them on the end of the queue) and `:schedule` (schedule for later).
|
25
|
+
# For :to, the name of a sidekiq queue should be specified. If none is specified, jobs will by default be
|
26
|
+
# requeued to the same queue they were originally enqueued in.
|
27
|
+
# Default: {with: `:enqueue`}
|
28
|
+
#
|
29
|
+
# @return [Hash]
|
30
|
+
attr_reader :default_requeue_options
|
31
|
+
|
32
|
+
def initialize
|
33
|
+
reset!
|
34
|
+
end
|
35
|
+
|
36
|
+
# @!attribute [w] cooldown_period
|
37
|
+
def cooldown_period=(value)
|
38
|
+
raise TypeError, "unexpected type #{value.class}" unless value.nil? || value.is_a?(Float)
|
39
|
+
raise ArgumentError, "period must be positive" unless value.nil? || value.positive?
|
40
|
+
|
41
|
+
@cooldown_period = value
|
42
|
+
end
|
43
|
+
|
44
|
+
# @!attribute [w] cooldown_threshold
|
45
|
+
def cooldown_threshold=(value)
|
46
|
+
raise TypeError, "unexpected type #{value.class}" unless value.is_a?(Integer)
|
47
|
+
raise ArgumentError, "threshold must be positive" unless value.positive?
|
48
|
+
|
49
|
+
@cooldown_threshold = value
|
50
|
+
end
|
51
|
+
|
52
|
+
# @!attribute [w] default_requeue_options
|
53
|
+
def default_requeue_options=(options)
|
54
|
+
requeue_with = options.delete(:with).intern || :enqueue
|
55
|
+
|
56
|
+
@default_requeue_options = options.merge({ with: requeue_with })
|
57
|
+
end
|
58
|
+
|
59
|
+
def reset!
|
60
|
+
@cooldown_period = 1.0
|
61
|
+
@cooldown_threshold = 100
|
62
|
+
@default_requeue_options = { with: :enqueue }
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "concurrent"
|
4
|
+
|
5
|
+
require_relative "./expirable_set"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
module Throttled
|
9
|
+
# @api internal
|
10
|
+
#
|
11
|
+
# Queues cooldown manager. Tracks list of queues that should be temporarily
|
12
|
+
# (for the duration of {Config#cooldown_period}) excluded from polling.
|
13
|
+
class Cooldown
|
14
|
+
class << self
|
15
|
+
# Returns new {Cooldown} instance if {Config#cooldown_period} is not `nil`.
|
16
|
+
#
|
17
|
+
# @param config [Config]
|
18
|
+
# @return [Cooldown, nil]
|
19
|
+
def [](config)
|
20
|
+
new(config) if config.cooldown_period
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
# @param config [Config]
|
25
|
+
def initialize(config)
|
26
|
+
@queues = ExpirableSet.new(config.cooldown_period)
|
27
|
+
@threshold = config.cooldown_threshold
|
28
|
+
@tracker = Concurrent::Map.new
|
29
|
+
end
|
30
|
+
|
31
|
+
# Notify that given queue returned job that was throttled.
|
32
|
+
#
|
33
|
+
# @param queue [String]
|
34
|
+
# @return [void]
|
35
|
+
def notify_throttled(queue)
|
36
|
+
@queues.add(queue) if @threshold <= @tracker.merge_pair(queue, 1, &:succ)
|
37
|
+
end
|
38
|
+
|
39
|
+
# Notify that given queue returned job that was not throttled.
|
40
|
+
#
|
41
|
+
# @param queue [String]
|
42
|
+
# @return [void]
|
43
|
+
def notify_admitted(queue)
|
44
|
+
@tracker.delete(queue)
|
45
|
+
end
|
46
|
+
|
47
|
+
# List of queues that should not be polled
|
48
|
+
#
|
49
|
+
# @return [Array<String>]
|
50
|
+
def queues
|
51
|
+
@queues.to_a
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,70 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "concurrent"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
module Throttled
|
7
|
+
# @api internal
|
8
|
+
#
|
9
|
+
# Set of elements with expirations.
|
10
|
+
#
|
11
|
+
# @example
|
12
|
+
# set = ExpirableSet.new(10.0)
|
13
|
+
# set.add("a")
|
14
|
+
# sleep(5)
|
15
|
+
# set.add("b")
|
16
|
+
# set.to_a # => ["a", "b"]
|
17
|
+
# sleep(5)
|
18
|
+
# set.to_a # => ["b"]
|
19
|
+
class ExpirableSet
|
20
|
+
include Enumerable
|
21
|
+
|
22
|
+
# @param ttl [Float] expiration is seconds
|
23
|
+
# @raise [ArgumentError] if `ttl` is not positive Float
|
24
|
+
def initialize(ttl)
|
25
|
+
raise ArgumentError, "ttl must be positive Float" unless ttl.is_a?(Float) && ttl.positive?
|
26
|
+
|
27
|
+
@elements = Concurrent::Map.new
|
28
|
+
@ttl = ttl
|
29
|
+
end
|
30
|
+
|
31
|
+
# @param element [Object]
|
32
|
+
# @return [ExpirableSet] self
|
33
|
+
def add(element)
|
34
|
+
# cleanup expired elements to avoid mem-leak
|
35
|
+
horizon = now
|
36
|
+
expired = @elements.each_pair.select { |(_, sunset)| expired?(sunset, horizon) }
|
37
|
+
expired.each { |pair| @elements.delete_pair(*pair) }
|
38
|
+
|
39
|
+
# add new element
|
40
|
+
@elements[element] = now + @ttl
|
41
|
+
|
42
|
+
self
|
43
|
+
end
|
44
|
+
|
45
|
+
# @yield [Object] Gives each live (not expired) element to the block
|
46
|
+
def each
|
47
|
+
return to_enum __method__ unless block_given?
|
48
|
+
|
49
|
+
horizon = now
|
50
|
+
|
51
|
+
@elements.each_pair do |element, sunset|
|
52
|
+
yield element unless expired?(sunset, horizon)
|
53
|
+
end
|
54
|
+
|
55
|
+
self
|
56
|
+
end
|
57
|
+
|
58
|
+
private
|
59
|
+
|
60
|
+
# @return [Float]
|
61
|
+
def now
|
62
|
+
::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
63
|
+
end
|
64
|
+
|
65
|
+
def expired?(sunset, horizon)
|
66
|
+
sunset <= horizon
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|