sidekiq-throttled 1.4.0 → 1.5.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.adoc +26 -6
- data/lib/sidekiq/throttled/config.rb +24 -2
- data/lib/sidekiq/throttled/job.rb +26 -2
- data/lib/sidekiq/throttled/patches/basic_fetch.rb +0 -11
- data/lib/sidekiq/throttled/patches/super_fetch.rb +0 -13
- data/lib/sidekiq/throttled/patches/throttled_retriever.rb +1 -1
- data/lib/sidekiq/throttled/strategy/concurrency.rb +10 -0
- data/lib/sidekiq/throttled/strategy/threshold.rb +19 -0
- data/lib/sidekiq/throttled/strategy.rb +96 -5
- data/lib/sidekiq/throttled/strategy_collection.rb +5 -0
- data/lib/sidekiq/throttled/version.rb +1 -1
- data/lib/sidekiq/throttled.rb +14 -8
- metadata +6 -6
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 9d5819912372e26634558752740bc154c4d22899b42925740b86152433744740
|
4
|
+
data.tar.gz: dadcea04012b95e0349728c573a3253d86bae7920ee5844b1df9fb80933c2267
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: cf81c1a319d6f4af5c5a06f5df95dd99ad44a405f152cfda5b4c6123d4692b93d23137ab12422d6b83c82090dc422b15b441f34ed43dc1868933d54354c1326d
|
7
|
+
data.tar.gz: f72803ff9a84bb3cc729869dc1105dcc9f9581c7c4c59eb2bb8a28594a7fe0df49e9b064dbae2c1f46020790637769196f98b5b38a0f77f2cb858f1b6d928679
|
data/README.adoc
CHANGED
@@ -31,7 +31,6 @@ Or install it yourself as:
|
|
31
31
|
|
32
32
|
$ gem install sidekiq-throttled
|
33
33
|
|
34
|
-
|
35
34
|
== Usage
|
36
35
|
|
37
36
|
Add somewhere in your app's bootstrap (e.g. `config/initializers/sidekiq.rb` if
|
@@ -81,6 +80,17 @@ end
|
|
81
80
|
----
|
82
81
|
|
83
82
|
|
83
|
+
=== Web UI
|
84
|
+
|
85
|
+
To add a Throttled tab to your sidekiq web dashboard, require it durring your
|
86
|
+
application initialization.
|
87
|
+
|
88
|
+
[source,ruby]
|
89
|
+
----
|
90
|
+
require "sidekiq/throttled/web"
|
91
|
+
----
|
92
|
+
|
93
|
+
|
84
94
|
=== Configuration
|
85
95
|
|
86
96
|
[source,ruby]
|
@@ -89,16 +99,27 @@ Sidekiq::Throttled.configure do |config|
|
|
89
99
|
# Period in seconds to exclude queue from polling in case it returned
|
90
100
|
# {config.cooldown_threshold} amount of throttled jobs in a row. Set
|
91
101
|
# this value to `nil` to disable cooldown manager completely.
|
92
|
-
# Default:
|
93
|
-
config.cooldown_period =
|
102
|
+
# Default: 1.0
|
103
|
+
config.cooldown_period = 1.0
|
94
104
|
|
95
105
|
# Exclude queue from polling after it returned given amount of throttled
|
96
106
|
# jobs in a row.
|
97
|
-
# Default:
|
98
|
-
config.cooldown_threshold =
|
107
|
+
# Default: 100 (cooldown after hundredth throttled job in a row)
|
108
|
+
config.cooldown_threshold = 100
|
99
109
|
end
|
100
110
|
----
|
101
111
|
|
112
|
+
[WARNING]
|
113
|
+
.Cooldown Settings
|
114
|
+
====
|
115
|
+
If a queue contains a thousand jobs in a row that will be throttled,
|
116
|
+
the cooldown will kick-in 10 times in a row, meaning it will take 10 seconds
|
117
|
+
before all those jobs are put back at the end of the queue and you actually
|
118
|
+
start processing other jobs.
|
119
|
+
|
120
|
+
You may want to adjust the cooldown_threshold and cooldown_period,
|
121
|
+
keeping in mind that this will also impact the load on your Redis server.
|
122
|
+
====
|
102
123
|
|
103
124
|
==== Middleware(s)
|
104
125
|
|
@@ -290,7 +311,6 @@ dropped.
|
|
290
311
|
|
291
312
|
This library aims to support and work with following Sidekiq versions:
|
292
313
|
|
293
|
-
* Sidekiq 6.5.x
|
294
314
|
* Sidekiq 7.0.x
|
295
315
|
* Sidekiq 7.1.x
|
296
316
|
* Sidekiq 7.2.x
|
@@ -19,9 +19,18 @@ module Sidekiq
|
|
19
19
|
# @return [Integer]
|
20
20
|
attr_reader :cooldown_threshold
|
21
21
|
|
22
|
+
# Specifies how we should return throttled jobs to the queue so they can be executed later.
|
23
|
+
# Expects a hash with keys that may include :with and :to
|
24
|
+
# For :with, options are `:enqueue` (put them on the end of the queue) and `:schedule` (schedule for later).
|
25
|
+
# For :to, the name of a sidekiq queue should be specified. If none is specified, jobs will by default be
|
26
|
+
# requeued to the same queue they were originally enqueued in.
|
27
|
+
# Default: {with: `:enqueue`}
|
28
|
+
#
|
29
|
+
# @return [Hash]
|
30
|
+
attr_reader :default_requeue_options
|
31
|
+
|
22
32
|
def initialize
|
23
|
-
|
24
|
-
@cooldown_threshold = 1
|
33
|
+
reset!
|
25
34
|
end
|
26
35
|
|
27
36
|
# @!attribute [w] cooldown_period
|
@@ -39,6 +48,19 @@ module Sidekiq
|
|
39
48
|
|
40
49
|
@cooldown_threshold = value
|
41
50
|
end
|
51
|
+
|
52
|
+
# @!attribute [w] default_requeue_options
|
53
|
+
def default_requeue_options=(options)
|
54
|
+
requeue_with = options.delete(:with).intern || :enqueue
|
55
|
+
|
56
|
+
@default_requeue_options = options.merge({ with: requeue_with })
|
57
|
+
end
|
58
|
+
|
59
|
+
def reset!
|
60
|
+
@cooldown_period = 1.0
|
61
|
+
@cooldown_threshold = 100
|
62
|
+
@default_requeue_options = { with: :enqueue }
|
63
|
+
end
|
42
64
|
end
|
43
65
|
end
|
44
66
|
end
|
@@ -13,8 +13,9 @@ module Sidekiq
|
|
13
13
|
# include Sidekiq::Job
|
14
14
|
# include Sidekiq::Throttled::Job
|
15
15
|
#
|
16
|
-
#
|
17
|
-
# sidekiq_throttle :threshold => { :limit => 123, :period => 1.hour }
|
16
|
+
# sidkiq_options :queue => :my_queue
|
17
|
+
# sidekiq_throttle :threshold => { :limit => 123, :period => 1.hour },
|
18
|
+
# :requeue => { :to => :other_queue, :with => :schedule }
|
18
19
|
#
|
19
20
|
# def perform
|
20
21
|
# # ...
|
@@ -23,6 +24,8 @@ module Sidekiq
|
|
23
24
|
#
|
24
25
|
# @see ClassMethods
|
25
26
|
module Job
|
27
|
+
VALID_VALUES_FOR_REQUEUE_WITH = %i[enqueue schedule].freeze
|
28
|
+
|
26
29
|
# Extends worker class with {ClassMethods}.
|
27
30
|
#
|
28
31
|
# @note Using `included` hook with extending worker with {ClassMethods}
|
@@ -30,6 +33,7 @@ module Sidekiq
|
|
30
33
|
#
|
31
34
|
# @private
|
32
35
|
def self.included(base)
|
36
|
+
base.sidekiq_class_attribute :sidekiq_throttled_requeue_options
|
33
37
|
base.extend(ClassMethods)
|
34
38
|
end
|
35
39
|
|
@@ -71,9 +75,29 @@ module Sidekiq
|
|
71
75
|
# })
|
72
76
|
# end
|
73
77
|
#
|
78
|
+
# @example Allow max 123 MyJob jobs per hour; when jobs are throttled, schedule them for later in :other_queue
|
79
|
+
#
|
80
|
+
# class MyJob
|
81
|
+
# include Sidekiq::Job
|
82
|
+
# include Sidekiq::Throttled::Job
|
83
|
+
#
|
84
|
+
# sidekiq_throttle({
|
85
|
+
# :threshold => { :limit => 123, :period => 1.hour },
|
86
|
+
# :requeue => { :to => :other_queue, :with => :schedule }
|
87
|
+
# })
|
88
|
+
# end
|
89
|
+
#
|
90
|
+
# @param [Hash] requeue What to do with jobs that are throttled
|
74
91
|
# @see Registry.add
|
75
92
|
# @return [void]
|
76
93
|
def sidekiq_throttle(**kwargs)
|
94
|
+
requeue_options = Throttled.config.default_requeue_options.merge(kwargs.delete(:requeue) || {})
|
95
|
+
unless VALID_VALUES_FOR_REQUEUE_WITH.include?(requeue_options[:with])
|
96
|
+
raise ArgumentError, "requeue: #{requeue_options[:with]} is not a valid value for :with"
|
97
|
+
end
|
98
|
+
|
99
|
+
self.sidekiq_throttled_requeue_options = requeue_options
|
100
|
+
|
77
101
|
Registry.add(self, **kwargs)
|
78
102
|
end
|
79
103
|
|
@@ -15,17 +15,6 @@ module Sidekiq
|
|
15
15
|
|
16
16
|
private
|
17
17
|
|
18
|
-
# Pushes job back to the head of the queue, so that job won't be tried
|
19
|
-
# immediately after it was requeued (in most cases).
|
20
|
-
#
|
21
|
-
# @note This is triggered when job is throttled. So it is same operation
|
22
|
-
# Sidekiq performs upon `Sidekiq::Worker.perform_async` call.
|
23
|
-
#
|
24
|
-
# @return [void]
|
25
|
-
def requeue_throttled(work)
|
26
|
-
redis { |conn| conn.lpush(work.queue, work.job) }
|
27
|
-
end
|
28
|
-
|
29
18
|
# Returns list of queues to try to fetch jobs from.
|
30
19
|
#
|
31
20
|
# @note It may return an empty array.
|
@@ -14,19 +14,6 @@ module Sidekiq
|
|
14
14
|
|
15
15
|
private
|
16
16
|
|
17
|
-
# Calls SuperFetch UnitOfWork's requeue to remove the job from the
|
18
|
-
# temporary queue and push job back to the head of the queue, so that
|
19
|
-
# the job won't be tried immediately after it was requeued (in most cases).
|
20
|
-
#
|
21
|
-
# @note This is triggered when job is throttled.
|
22
|
-
#
|
23
|
-
# @return [void]
|
24
|
-
def requeue_throttled(work)
|
25
|
-
# SuperFetch UnitOfWork's requeue will remove it from the temporary
|
26
|
-
# queue and then requeue it, so no acknowledgement call is needed.
|
27
|
-
work.requeue
|
28
|
-
end
|
29
|
-
|
30
17
|
# Returns list of non-paused queues to try to fetch jobs from.
|
31
18
|
#
|
32
19
|
# @note It may return an empty array.
|
@@ -52,6 +52,16 @@ module Sidekiq
|
|
52
52
|
Sidekiq.redis { |redis| 1 == SCRIPT.call(redis, keys: keys, argv: argv) }
|
53
53
|
end
|
54
54
|
|
55
|
+
# @return [Float] How long, in seconds, before we'll next be able to take on jobs
|
56
|
+
def retry_in(_jid, *job_args)
|
57
|
+
job_limit = limit(job_args)
|
58
|
+
return 0.0 if !job_limit || count(*job_args) < job_limit
|
59
|
+
|
60
|
+
oldest_jid_with_score = Sidekiq.redis { |redis| redis.zrange(key(job_args), 0, 0, withscores: true) }.first
|
61
|
+
expiry_time = oldest_jid_with_score.last.to_f
|
62
|
+
expiry_time - Time.now.to_f
|
63
|
+
end
|
64
|
+
|
55
65
|
# @return [Integer] Current count of jobs
|
56
66
|
def count(*job_args)
|
57
67
|
Sidekiq.redis { |conn| conn.zcard(key(job_args)) }.to_i
|
@@ -69,6 +69,25 @@ module Sidekiq
|
|
69
69
|
Sidekiq.redis { |redis| 1 == SCRIPT.call(redis, keys: keys, argv: argv) }
|
70
70
|
end
|
71
71
|
|
72
|
+
# @return [Float] How long, in seconds, before we'll next be able to take on jobs
|
73
|
+
def retry_in(*job_args)
|
74
|
+
job_limit = limit(job_args)
|
75
|
+
return 0.0 if !job_limit || count(*job_args) < job_limit
|
76
|
+
|
77
|
+
job_period = period(job_args)
|
78
|
+
job_key = key(job_args)
|
79
|
+
time_since_oldest = Time.now.to_f - Sidekiq.redis { |redis| redis.lindex(job_key, -1) }.to_f
|
80
|
+
if time_since_oldest > job_period
|
81
|
+
# The oldest job on our list is from more than the throttling period ago,
|
82
|
+
# which means we have not hit the limit this period.
|
83
|
+
0.0
|
84
|
+
else
|
85
|
+
# If we can only have X jobs every Y minutes, then wait until Y minutes have elapsed
|
86
|
+
# since the oldest job on our list.
|
87
|
+
job_period - time_since_oldest
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
72
91
|
# @return [Integer] Current count of jobs
|
73
92
|
def count(*job_args)
|
74
93
|
Sidekiq.redis { |conn| conn.llen(key(job_args)) }.to_i
|
@@ -31,7 +31,7 @@ module Sidekiq
|
|
31
31
|
# See keyword args of {Strategy::Threshold#initialize} for details.
|
32
32
|
# @param [#call] key_suffix Dynamic key suffix generator.
|
33
33
|
# @param [#call] observer Process called after throttled.
|
34
|
-
def initialize(name, concurrency: nil, threshold: nil, key_suffix: nil, observer: nil)
|
34
|
+
def initialize(name, concurrency: nil, threshold: nil, key_suffix: nil, observer: nil)
|
35
35
|
@observer = observer
|
36
36
|
|
37
37
|
@concurrency = StrategyCollection.new(concurrency,
|
@@ -44,9 +44,7 @@ module Sidekiq
|
|
44
44
|
name: name,
|
45
45
|
key_suffix: key_suffix)
|
46
46
|
|
47
|
-
|
48
|
-
|
49
|
-
raise ArgumentError, "Neither :concurrency nor :threshold given"
|
47
|
+
raise ArgumentError, "Neither :concurrency nor :threshold given" unless @concurrency.any? || @threshold.any?
|
50
48
|
end
|
51
49
|
|
52
50
|
# @return [Boolean] whenever strategy has dynamic config
|
@@ -74,18 +72,111 @@ module Sidekiq
|
|
74
72
|
false
|
75
73
|
end
|
76
74
|
|
75
|
+
# Return throttled job to be executed later. Implementation depends on the value of `with`:
|
76
|
+
# :enqueue means put the job back at the end of the queue immediately
|
77
|
+
# :schedule means schedule enqueueing the job for a later time when we expect to have capacity
|
78
|
+
#
|
79
|
+
# @param [#to_s, #call] with How to handle the throttled job
|
80
|
+
# @param [#to_s, #call] to Name of the queue to re-queue the job to.
|
81
|
+
# If not specified, will use the job's original queue.
|
82
|
+
# @return [void]
|
83
|
+
def requeue_throttled(work, with:, to: nil) # rubocop:disable Metrics/MethodLength
|
84
|
+
# Resolve :with and :to arguments, calling them if they are Procs
|
85
|
+
job_args = JSON.parse(work.job)["args"]
|
86
|
+
requeue_with = with.respond_to?(:call) ? with.call(*job_args) : with
|
87
|
+
target_queue = calc_target_queue(work, to)
|
88
|
+
|
89
|
+
case requeue_with
|
90
|
+
when :enqueue
|
91
|
+
re_enqueue_throttled(work, target_queue)
|
92
|
+
when :schedule
|
93
|
+
# Find out when we will next be able to execute this job, and reschedule for then.
|
94
|
+
reschedule_throttled(work, requeue_to: target_queue)
|
95
|
+
else
|
96
|
+
raise "unrecognized :with option #{with}"
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
77
100
|
# Marks job as being processed.
|
78
101
|
# @return [void]
|
79
102
|
def finalize!(jid, *job_args)
|
80
103
|
@concurrency&.finalize!(jid, *job_args)
|
81
104
|
end
|
82
105
|
|
83
|
-
# Resets count of jobs of all
|
106
|
+
# Resets count of jobs of all available strategies
|
84
107
|
# @return [void]
|
85
108
|
def reset!
|
86
109
|
@concurrency&.reset!
|
87
110
|
@threshold&.reset!
|
88
111
|
end
|
112
|
+
|
113
|
+
private
|
114
|
+
|
115
|
+
def calc_target_queue(work, to) # rubocop:disable Metrics/MethodLength
|
116
|
+
target = case to
|
117
|
+
when Proc, Method
|
118
|
+
to.call(*JSON.parse(work.job)["args"])
|
119
|
+
when NilClass
|
120
|
+
work.queue
|
121
|
+
when String, Symbol
|
122
|
+
to.to_s
|
123
|
+
else
|
124
|
+
raise ArgumentError, "Invalid argument for `to`"
|
125
|
+
end
|
126
|
+
|
127
|
+
target = work.queue if target.nil? || target.empty?
|
128
|
+
|
129
|
+
target.start_with?("queue:") ? target : "queue:#{target}"
|
130
|
+
end
|
131
|
+
|
132
|
+
# Push the job back to the head of the queue.
|
133
|
+
def re_enqueue_throttled(work, requeue_to)
|
134
|
+
case work.class.name
|
135
|
+
when "Sidekiq::Pro::SuperFetch::UnitOfWork"
|
136
|
+
# Calls SuperFetch UnitOfWork's requeue to remove the job from the
|
137
|
+
# temporary queue and push job back to the head of the target queue, so that
|
138
|
+
# the job won't be tried immediately after it was requeued (in most cases).
|
139
|
+
work.queue = requeue_to if requeue_to
|
140
|
+
work.requeue
|
141
|
+
else
|
142
|
+
# This is the same operation Sidekiq performs upon `Sidekiq::Worker.perform_async` call.
|
143
|
+
Sidekiq.redis { |conn| conn.lpush(requeue_to, work.job) }
|
144
|
+
end
|
145
|
+
end
|
146
|
+
|
147
|
+
def reschedule_throttled(work, requeue_to:)
|
148
|
+
message = JSON.parse(work.job)
|
149
|
+
job_class = message.fetch("wrapped") { message.fetch("class") { return false } }
|
150
|
+
job_args = message["args"]
|
151
|
+
|
152
|
+
# Re-enqueue the job to the target queue at another time as a NEW unit of work
|
153
|
+
# AND THEN mark this work as done, so SuperFetch doesn't think this instance is orphaned
|
154
|
+
# Technically, the job could processed twice if the process dies between the two lines,
|
155
|
+
# but your job should be idempotent anyway, right?
|
156
|
+
# The job running twice was already a risk with SuperFetch anyway and this doesn't really increase that risk.
|
157
|
+
Sidekiq::Client.enqueue_to_in(requeue_to, retry_in(work), Object.const_get(job_class), *job_args)
|
158
|
+
work.acknowledge
|
159
|
+
end
|
160
|
+
|
161
|
+
def retry_in(work)
|
162
|
+
message = JSON.parse(work.job)
|
163
|
+
jid = message.fetch("jid") { return false }
|
164
|
+
job_args = message["args"]
|
165
|
+
|
166
|
+
# Ask both concurrency and threshold, if relevant, how long minimum until we can retry.
|
167
|
+
# If we get two answers, take the longer one.
|
168
|
+
intervals = [@concurrency&.retry_in(jid, *job_args), @threshold&.retry_in(*job_args)].compact
|
169
|
+
|
170
|
+
raise "Cannot compute a valid retry interval" if intervals.empty?
|
171
|
+
|
172
|
+
interval = intervals.max
|
173
|
+
|
174
|
+
# Add a random amount of jitter, proportional to the length of the minimum retry time.
|
175
|
+
# This helps spread out jobs more evenly and avoid clumps of jobs on the queue.
|
176
|
+
interval += rand(interval / 5) if interval > 10
|
177
|
+
|
178
|
+
interval
|
179
|
+
end
|
89
180
|
end
|
90
181
|
end
|
91
182
|
end
|
@@ -41,6 +41,11 @@ module Sidekiq
|
|
41
41
|
any? { |s| s.throttled?(...) }
|
42
42
|
end
|
43
43
|
|
44
|
+
# @return [Float] How long, in seconds, before we'll next be able to take on jobs
|
45
|
+
def retry_in(*args)
|
46
|
+
max { |s| s.retry_in(*args) }
|
47
|
+
end
|
48
|
+
|
44
49
|
# Marks job as being processed.
|
45
50
|
# @return [void]
|
46
51
|
def finalize!(...)
|
data/lib/sidekiq/throttled.rb
CHANGED
@@ -54,6 +54,11 @@ module Sidekiq
|
|
54
54
|
# @return [Cooldown, nil]
|
55
55
|
attr_reader :cooldown
|
56
56
|
|
57
|
+
# @api internal
|
58
|
+
#
|
59
|
+
# @return [Config, nil]
|
60
|
+
attr_reader :config
|
61
|
+
|
57
62
|
# @example
|
58
63
|
# Sidekiq::Throttled.configure do |config|
|
59
64
|
# config.cooldown_period = nil # Disable queues cooldown manager
|
@@ -88,15 +93,16 @@ module Sidekiq
|
|
88
93
|
false
|
89
94
|
end
|
90
95
|
|
91
|
-
#
|
92
|
-
|
93
|
-
|
96
|
+
# Return throttled job to be executed later, delegating the details of how to do that
|
97
|
+
# to the Strategy for that job.
|
98
|
+
#
|
99
|
+
# @return [void]
|
100
|
+
def requeue_throttled(work)
|
101
|
+
message = JSON.parse(work.job)
|
102
|
+
job_class = Object.const_get(message.fetch("wrapped") { message.fetch("class") { return false } })
|
94
103
|
|
95
|
-
|
96
|
-
|
97
|
-
chain.remove(Sidekiq::Throttled::Middlewares::Server)
|
98
|
-
chain.add(Sidekiq::Throttled::Middlewares::Server)
|
99
|
-
end
|
104
|
+
Registry.get job_class do |strategy|
|
105
|
+
strategy.requeue_throttled(work, **job_class.sidekiq_throttled_requeue_options)
|
100
106
|
end
|
101
107
|
end
|
102
108
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: sidekiq-throttled
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.5.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alexey Zapparov
|
8
8
|
autorequire:
|
9
|
-
bindir:
|
9
|
+
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-11-17 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: concurrent-ruby
|
@@ -90,9 +90,9 @@ licenses:
|
|
90
90
|
- MIT
|
91
91
|
metadata:
|
92
92
|
homepage_uri: https://github.com/ixti/sidekiq-throttled
|
93
|
-
source_code_uri: https://github.com/ixti/sidekiq-throttled/tree/v1.
|
93
|
+
source_code_uri: https://github.com/ixti/sidekiq-throttled/tree/v1.5.0
|
94
94
|
bug_tracker_uri: https://github.com/ixti/sidekiq-throttled/issues
|
95
|
-
changelog_uri: https://github.com/ixti/sidekiq-throttled/blob/v1.
|
95
|
+
changelog_uri: https://github.com/ixti/sidekiq-throttled/blob/v1.5.0/CHANGES.md
|
96
96
|
rubygems_mfa_required: 'true'
|
97
97
|
post_install_message:
|
98
98
|
rdoc_options: []
|
@@ -109,7 +109,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
109
109
|
- !ruby/object:Gem::Version
|
110
110
|
version: '0'
|
111
111
|
requirements: []
|
112
|
-
rubygems_version: 3.
|
112
|
+
rubygems_version: 3.4.22
|
113
113
|
signing_key:
|
114
114
|
specification_version: 4
|
115
115
|
summary: Concurrency and rate-limit throttling for Sidekiq
|