sidekiq-throttled 1.3.0 → 1.5.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 433460cfaea479faf45edc665c6f8d7d025b3e35ccdf18a32061a0b3d3c42e68
4
- data.tar.gz: 4e36f4c938d7145646e04f1de6d483bdec5a4c18254c3153be3556285b779fbf
3
+ metadata.gz: 9d5819912372e26634558752740bc154c4d22899b42925740b86152433744740
4
+ data.tar.gz: dadcea04012b95e0349728c573a3253d86bae7920ee5844b1df9fb80933c2267
5
5
  SHA512:
6
- metadata.gz: 3ba640f6b15fa32eb21eccda8a319357439b7367fea6c0e4d4125e0e2cd5e91fbb822768d2171c57ac0cfbe0ba640b1d21960c6e2ea317997869c8f06187aec9
7
- data.tar.gz: bd28f31c6222b3f378e1e7a2d081cc9f05c4feb13b7be8b1b54ee91976a66b2263884837f3149dddcc6b047129a0311b65135433511b0dc3ed29d47ed88346ea
6
+ metadata.gz: cf81c1a319d6f4af5c5a06f5df95dd99ad44a405f152cfda5b4c6123d4692b93d23137ab12422d6b83c82090dc422b15b441f34ed43dc1868933d54354c1326d
7
+ data.tar.gz: f72803ff9a84bb3cc729869dc1105dcc9f9581c7c4c59eb2bb8a28594a7fe0df49e9b064dbae2c1f46020790637769196f98b5b38a0f77f2cb858f1b6d928679
data/README.adoc CHANGED
@@ -31,7 +31,6 @@ Or install it yourself as:
31
31
 
32
32
  $ gem install sidekiq-throttled
33
33
 
34
-
35
34
  == Usage
36
35
 
37
36
  Add somewhere in your app's bootstrap (e.g. `config/initializers/sidekiq.rb` if
@@ -81,6 +80,17 @@ end
81
80
  ----
82
81
 
83
82
 
83
+ === Web UI
84
+
85
+ To add a Throttled tab to your sidekiq web dashboard, require it durring your
86
+ application initialization.
87
+
88
+ [source,ruby]
89
+ ----
90
+ require "sidekiq/throttled/web"
91
+ ----
92
+
93
+
84
94
  === Configuration
85
95
 
86
96
  [source,ruby]
@@ -89,16 +99,27 @@ Sidekiq::Throttled.configure do |config|
89
99
  # Period in seconds to exclude queue from polling in case it returned
90
100
  # {config.cooldown_threshold} amount of throttled jobs in a row. Set
91
101
  # this value to `nil` to disable cooldown manager completely.
92
- # Default: 2.0
93
- config.cooldown_period = 2.0
102
+ # Default: 1.0
103
+ config.cooldown_period = 1.0
94
104
 
95
105
  # Exclude queue from polling after it returned given amount of throttled
96
106
  # jobs in a row.
97
- # Default: 1 (cooldown after first throttled job)
98
- config.cooldown_threshold = 1
107
+ # Default: 100 (cooldown after hundredth throttled job in a row)
108
+ config.cooldown_threshold = 100
99
109
  end
100
110
  ----
101
111
 
112
+ [WARNING]
113
+ .Cooldown Settings
114
+ ====
115
+ If a queue contains a thousand jobs in a row that will be throttled,
116
+ the cooldown will kick-in 10 times in a row, meaning it will take 10 seconds
117
+ before all those jobs are put back at the end of the queue and you actually
118
+ start processing other jobs.
119
+
120
+ You may want to adjust the cooldown_threshold and cooldown_period,
121
+ keeping in mind that this will also impact the load on your Redis server.
122
+ ====
102
123
 
103
124
  ==== Middleware(s)
104
125
 
@@ -290,7 +311,6 @@ dropped.
290
311
 
291
312
  This library aims to support and work with following Sidekiq versions:
292
313
 
293
- * Sidekiq 6.5.x
294
314
  * Sidekiq 7.0.x
295
315
  * Sidekiq 7.1.x
296
316
  * Sidekiq 7.2.x
@@ -310,9 +330,9 @@ And the following Sidekiq Pro versions:
310
330
 
311
331
  === Sidekiq-Pro
312
332
 
313
- If you're working on Sidekiq-Pro support make sure to copy `.rspec-sidekiq-pro`
314
- to `.rspec-local` and that you have Sidekiq-Pro license in the global config,
315
- or in the `BUNDLE_GEMS\__CONTRIBSYS__COM` env variable.
333
+ If you're working on Sidekiq-Pro support make sure that you have Sidekiq-Pro
334
+ license set either in the global config, or in `BUNDLE_GEMS\__CONTRIBSYS__COM`
335
+ environment variable.
316
336
 
317
337
  == Contributing
318
338
 
@@ -19,9 +19,18 @@ module Sidekiq
19
19
  # @return [Integer]
20
20
  attr_reader :cooldown_threshold
21
21
 
22
+ # Specifies how we should return throttled jobs to the queue so they can be executed later.
23
+ # Expects a hash with keys that may include :with and :to
24
+ # For :with, options are `:enqueue` (put them on the end of the queue) and `:schedule` (schedule for later).
25
+ # For :to, the name of a sidekiq queue should be specified. If none is specified, jobs will by default be
26
+ # requeued to the same queue they were originally enqueued in.
27
+ # Default: {with: `:enqueue`}
28
+ #
29
+ # @return [Hash]
30
+ attr_reader :default_requeue_options
31
+
22
32
  def initialize
23
- @cooldown_period = 2.0
24
- @cooldown_threshold = 1
33
+ reset!
25
34
  end
26
35
 
27
36
  # @!attribute [w] cooldown_period
@@ -39,6 +48,19 @@ module Sidekiq
39
48
 
40
49
  @cooldown_threshold = value
41
50
  end
51
+
52
+ # @!attribute [w] default_requeue_options
53
+ def default_requeue_options=(options)
54
+ requeue_with = options.delete(:with).intern || :enqueue
55
+
56
+ @default_requeue_options = options.merge({ with: requeue_with })
57
+ end
58
+
59
+ def reset!
60
+ @cooldown_period = 1.0
61
+ @cooldown_threshold = 100
62
+ @default_requeue_options = { with: :enqueue }
63
+ end
42
64
  end
43
65
  end
44
66
  end
@@ -13,8 +13,9 @@ module Sidekiq
13
13
  # include Sidekiq::Job
14
14
  # include Sidekiq::Throttled::Job
15
15
  #
16
- # sidekiq_options :queue => :my_queue
17
- # sidekiq_throttle :threshold => { :limit => 123, :period => 1.hour }
16
+ # sidkiq_options :queue => :my_queue
17
+ # sidekiq_throttle :threshold => { :limit => 123, :period => 1.hour },
18
+ # :requeue => { :to => :other_queue, :with => :schedule }
18
19
  #
19
20
  # def perform
20
21
  # # ...
@@ -23,6 +24,8 @@ module Sidekiq
23
24
  #
24
25
  # @see ClassMethods
25
26
  module Job
27
+ VALID_VALUES_FOR_REQUEUE_WITH = %i[enqueue schedule].freeze
28
+
26
29
  # Extends worker class with {ClassMethods}.
27
30
  #
28
31
  # @note Using `included` hook with extending worker with {ClassMethods}
@@ -30,6 +33,7 @@ module Sidekiq
30
33
  #
31
34
  # @private
32
35
  def self.included(base)
36
+ base.sidekiq_class_attribute :sidekiq_throttled_requeue_options
33
37
  base.extend(ClassMethods)
34
38
  end
35
39
 
@@ -71,9 +75,29 @@ module Sidekiq
71
75
  # })
72
76
  # end
73
77
  #
78
+ # @example Allow max 123 MyJob jobs per hour; when jobs are throttled, schedule them for later in :other_queue
79
+ #
80
+ # class MyJob
81
+ # include Sidekiq::Job
82
+ # include Sidekiq::Throttled::Job
83
+ #
84
+ # sidekiq_throttle({
85
+ # :threshold => { :limit => 123, :period => 1.hour },
86
+ # :requeue => { :to => :other_queue, :with => :schedule }
87
+ # })
88
+ # end
89
+ #
90
+ # @param [Hash] requeue What to do with jobs that are throttled
74
91
  # @see Registry.add
75
92
  # @return [void]
76
93
  def sidekiq_throttle(**kwargs)
94
+ requeue_options = Throttled.config.default_requeue_options.merge(kwargs.delete(:requeue) || {})
95
+ unless VALID_VALUES_FOR_REQUEUE_WITH.include?(requeue_options[:with])
96
+ raise ArgumentError, "requeue: #{requeue_options[:with]} is not a valid value for :with"
97
+ end
98
+
99
+ self.sidekiq_throttled_requeue_options = requeue_options
100
+
77
101
  Registry.add(self, **kwargs)
78
102
  end
79
103
 
@@ -0,0 +1,32 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Sidekiq
4
+ module Throttled
5
+ class Message
6
+ def initialize(item)
7
+ @item = item.is_a?(Hash) ? item : parse(item)
8
+ end
9
+
10
+ def job_class
11
+ @item.fetch("wrapped") { @item["class"] }
12
+ end
13
+
14
+ def job_args
15
+ @item.key?("wrapped") ? @item.dig("args", 0, "arguments") : @item["args"]
16
+ end
17
+
18
+ def job_id
19
+ @item["jid"]
20
+ end
21
+
22
+ private
23
+
24
+ def parse(item)
25
+ item = Sidekiq.load_json(item)
26
+ item.is_a?(Hash) ? item : {}
27
+ rescue JSON::ParserError
28
+ {}
29
+ end
30
+ end
31
+ end
32
+ end
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  # internal
4
+ require_relative "../message"
4
5
  require_relative "../registry"
5
6
 
6
7
  module Sidekiq
@@ -13,12 +14,11 @@ module Sidekiq
13
14
  def call(_worker, msg, _queue)
14
15
  yield
15
16
  ensure
16
- job = msg.fetch("wrapped") { msg["class"] }
17
- jid = msg["jid"]
17
+ message = Message.new(msg)
18
18
 
19
- if job && jid
20
- Registry.get job do |strategy|
21
- strategy.finalize!(jid, *msg["args"])
19
+ if message.job_class && message.job_id
20
+ Registry.get(message.job_class) do |strategy|
21
+ strategy.finalize!(message.job_id, *message.job_args)
22
22
  end
23
23
  end
24
24
  end
@@ -15,17 +15,6 @@ module Sidekiq
15
15
 
16
16
  private
17
17
 
18
- # Pushes job back to the head of the queue, so that job won't be tried
19
- # immediately after it was requeued (in most cases).
20
- #
21
- # @note This is triggered when job is throttled. So it is same operation
22
- # Sidekiq performs upon `Sidekiq::Worker.perform_async` call.
23
- #
24
- # @return [void]
25
- def requeue_throttled(work)
26
- redis { |conn| conn.lpush(work.queue, work.job) }
27
- end
28
-
29
18
  # Returns list of queues to try to fetch jobs from.
30
19
  #
31
20
  # @note It may return an empty array.
@@ -14,19 +14,6 @@ module Sidekiq
14
14
 
15
15
  private
16
16
 
17
- # Calls SuperFetch UnitOfWork's requeue to remove the job from the
18
- # temporary queue and push job back to the head of the queue, so that
19
- # the job won't be tried immediately after it was requeued (in most cases).
20
- #
21
- # @note This is triggered when job is throttled.
22
- #
23
- # @return [void]
24
- def requeue_throttled(work)
25
- # SuperFetch UnitOfWork's requeue will remove it from the temporary
26
- # queue and then requeue it, so no acknowledgement call is needed.
27
- work.requeue
28
- end
29
-
30
17
  # Returns list of non-paused queues to try to fetch jobs from.
31
18
  #
32
19
  # @note It may return an empty array.
@@ -12,7 +12,7 @@ module Sidekiq
12
12
 
13
13
  if work && Throttled.throttled?(work.job)
14
14
  Throttled.cooldown&.notify_throttled(work.queue)
15
- requeue_throttled(work)
15
+ Throttled.requeue_throttled(work)
16
16
  return nil
17
17
  end
18
18
 
@@ -52,6 +52,16 @@ module Sidekiq
52
52
  Sidekiq.redis { |redis| 1 == SCRIPT.call(redis, keys: keys, argv: argv) }
53
53
  end
54
54
 
55
+ # @return [Float] How long, in seconds, before we'll next be able to take on jobs
56
+ def retry_in(_jid, *job_args)
57
+ job_limit = limit(job_args)
58
+ return 0.0 if !job_limit || count(*job_args) < job_limit
59
+
60
+ oldest_jid_with_score = Sidekiq.redis { |redis| redis.zrange(key(job_args), 0, 0, withscores: true) }.first
61
+ expiry_time = oldest_jid_with_score.last.to_f
62
+ expiry_time - Time.now.to_f
63
+ end
64
+
55
65
  # @return [Integer] Current count of jobs
56
66
  def count(*job_args)
57
67
  Sidekiq.redis { |conn| conn.zcard(key(job_args)) }.to_i
@@ -69,6 +69,25 @@ module Sidekiq
69
69
  Sidekiq.redis { |redis| 1 == SCRIPT.call(redis, keys: keys, argv: argv) }
70
70
  end
71
71
 
72
+ # @return [Float] How long, in seconds, before we'll next be able to take on jobs
73
+ def retry_in(*job_args)
74
+ job_limit = limit(job_args)
75
+ return 0.0 if !job_limit || count(*job_args) < job_limit
76
+
77
+ job_period = period(job_args)
78
+ job_key = key(job_args)
79
+ time_since_oldest = Time.now.to_f - Sidekiq.redis { |redis| redis.lindex(job_key, -1) }.to_f
80
+ if time_since_oldest > job_period
81
+ # The oldest job on our list is from more than the throttling period ago,
82
+ # which means we have not hit the limit this period.
83
+ 0.0
84
+ else
85
+ # If we can only have X jobs every Y minutes, then wait until Y minutes have elapsed
86
+ # since the oldest job on our list.
87
+ job_period - time_since_oldest
88
+ end
89
+ end
90
+
72
91
  # @return [Integer] Current count of jobs
73
92
  def count(*job_args)
74
93
  Sidekiq.redis { |conn| conn.llen(key(job_args)) }.to_i
@@ -31,7 +31,7 @@ module Sidekiq
31
31
  # See keyword args of {Strategy::Threshold#initialize} for details.
32
32
  # @param [#call] key_suffix Dynamic key suffix generator.
33
33
  # @param [#call] observer Process called after throttled.
34
- def initialize(name, concurrency: nil, threshold: nil, key_suffix: nil, observer: nil) # rubocop:disable Metrics/MethodLength
34
+ def initialize(name, concurrency: nil, threshold: nil, key_suffix: nil, observer: nil)
35
35
  @observer = observer
36
36
 
37
37
  @concurrency = StrategyCollection.new(concurrency,
@@ -44,9 +44,7 @@ module Sidekiq
44
44
  name: name,
45
45
  key_suffix: key_suffix)
46
46
 
47
- return if @concurrency.any? || @threshold.any?
48
-
49
- raise ArgumentError, "Neither :concurrency nor :threshold given"
47
+ raise ArgumentError, "Neither :concurrency nor :threshold given" unless @concurrency.any? || @threshold.any?
50
48
  end
51
49
 
52
50
  # @return [Boolean] whenever strategy has dynamic config
@@ -74,18 +72,111 @@ module Sidekiq
74
72
  false
75
73
  end
76
74
 
75
+ # Return throttled job to be executed later. Implementation depends on the value of `with`:
76
+ # :enqueue means put the job back at the end of the queue immediately
77
+ # :schedule means schedule enqueueing the job for a later time when we expect to have capacity
78
+ #
79
+ # @param [#to_s, #call] with How to handle the throttled job
80
+ # @param [#to_s, #call] to Name of the queue to re-queue the job to.
81
+ # If not specified, will use the job's original queue.
82
+ # @return [void]
83
+ def requeue_throttled(work, with:, to: nil) # rubocop:disable Metrics/MethodLength
84
+ # Resolve :with and :to arguments, calling them if they are Procs
85
+ job_args = JSON.parse(work.job)["args"]
86
+ requeue_with = with.respond_to?(:call) ? with.call(*job_args) : with
87
+ target_queue = calc_target_queue(work, to)
88
+
89
+ case requeue_with
90
+ when :enqueue
91
+ re_enqueue_throttled(work, target_queue)
92
+ when :schedule
93
+ # Find out when we will next be able to execute this job, and reschedule for then.
94
+ reschedule_throttled(work, requeue_to: target_queue)
95
+ else
96
+ raise "unrecognized :with option #{with}"
97
+ end
98
+ end
99
+
77
100
  # Marks job as being processed.
78
101
  # @return [void]
79
102
  def finalize!(jid, *job_args)
80
103
  @concurrency&.finalize!(jid, *job_args)
81
104
  end
82
105
 
83
- # Resets count of jobs of all avaliable strategies
106
+ # Resets count of jobs of all available strategies
84
107
  # @return [void]
85
108
  def reset!
86
109
  @concurrency&.reset!
87
110
  @threshold&.reset!
88
111
  end
112
+
113
+ private
114
+
115
+ def calc_target_queue(work, to) # rubocop:disable Metrics/MethodLength
116
+ target = case to
117
+ when Proc, Method
118
+ to.call(*JSON.parse(work.job)["args"])
119
+ when NilClass
120
+ work.queue
121
+ when String, Symbol
122
+ to.to_s
123
+ else
124
+ raise ArgumentError, "Invalid argument for `to`"
125
+ end
126
+
127
+ target = work.queue if target.nil? || target.empty?
128
+
129
+ target.start_with?("queue:") ? target : "queue:#{target}"
130
+ end
131
+
132
+ # Push the job back to the head of the queue.
133
+ def re_enqueue_throttled(work, requeue_to)
134
+ case work.class.name
135
+ when "Sidekiq::Pro::SuperFetch::UnitOfWork"
136
+ # Calls SuperFetch UnitOfWork's requeue to remove the job from the
137
+ # temporary queue and push job back to the head of the target queue, so that
138
+ # the job won't be tried immediately after it was requeued (in most cases).
139
+ work.queue = requeue_to if requeue_to
140
+ work.requeue
141
+ else
142
+ # This is the same operation Sidekiq performs upon `Sidekiq::Worker.perform_async` call.
143
+ Sidekiq.redis { |conn| conn.lpush(requeue_to, work.job) }
144
+ end
145
+ end
146
+
147
+ def reschedule_throttled(work, requeue_to:)
148
+ message = JSON.parse(work.job)
149
+ job_class = message.fetch("wrapped") { message.fetch("class") { return false } }
150
+ job_args = message["args"]
151
+
152
+ # Re-enqueue the job to the target queue at another time as a NEW unit of work
153
+ # AND THEN mark this work as done, so SuperFetch doesn't think this instance is orphaned
154
+ # Technically, the job could processed twice if the process dies between the two lines,
155
+ # but your job should be idempotent anyway, right?
156
+ # The job running twice was already a risk with SuperFetch anyway and this doesn't really increase that risk.
157
+ Sidekiq::Client.enqueue_to_in(requeue_to, retry_in(work), Object.const_get(job_class), *job_args)
158
+ work.acknowledge
159
+ end
160
+
161
+ def retry_in(work)
162
+ message = JSON.parse(work.job)
163
+ jid = message.fetch("jid") { return false }
164
+ job_args = message["args"]
165
+
166
+ # Ask both concurrency and threshold, if relevant, how long minimum until we can retry.
167
+ # If we get two answers, take the longer one.
168
+ intervals = [@concurrency&.retry_in(jid, *job_args), @threshold&.retry_in(*job_args)].compact
169
+
170
+ raise "Cannot compute a valid retry interval" if intervals.empty?
171
+
172
+ interval = intervals.max
173
+
174
+ # Add a random amount of jitter, proportional to the length of the minimum retry time.
175
+ # This helps spread out jobs more evenly and avoid clumps of jobs on the queue.
176
+ interval += rand(interval / 5) if interval > 10
177
+
178
+ interval
179
+ end
89
180
  end
90
181
  end
91
182
  end
@@ -41,6 +41,11 @@ module Sidekiq
41
41
  any? { |s| s.throttled?(...) }
42
42
  end
43
43
 
44
+ # @return [Float] How long, in seconds, before we'll next be able to take on jobs
45
+ def retry_in(*args)
46
+ max { |s| s.retry_in(*args) }
47
+ end
48
+
44
49
  # Marks job as being processed.
45
50
  # @return [void]
46
51
  def finalize!(...)
@@ -3,6 +3,6 @@
3
3
  module Sidekiq
4
4
  module Throttled
5
5
  # Gem version
6
- VERSION = "1.3.0"
6
+ VERSION = "1.5.0"
7
7
  end
8
8
  end
@@ -5,6 +5,7 @@ require "sidekiq"
5
5
  require_relative "./throttled/config"
6
6
  require_relative "./throttled/cooldown"
7
7
  require_relative "./throttled/job"
8
+ require_relative "./throttled/message"
8
9
  require_relative "./throttled/middlewares/server"
9
10
  require_relative "./throttled/patches/basic_fetch"
10
11
  require_relative "./throttled/patches/super_fetch"
@@ -53,6 +54,11 @@ module Sidekiq
53
54
  # @return [Cooldown, nil]
54
55
  attr_reader :cooldown
55
56
 
57
+ # @api internal
58
+ #
59
+ # @return [Config, nil]
60
+ attr_reader :config
61
+
56
62
  # @example
57
63
  # Sidekiq::Throttled.configure do |config|
58
64
  # config.cooldown_period = nil # Disable queues cooldown manager
@@ -75,14 +81,11 @@ module Sidekiq
75
81
  # @param [String] message Job's JSON payload
76
82
  # @return [Boolean]
77
83
  def throttled?(message)
78
- message = Sidekiq.load_json(message)
79
- job = message.fetch("wrapped") { message["class"] }
80
- jid = message["jid"]
84
+ message = Message.new(message)
85
+ return false unless message.job_class && message.job_id
81
86
 
82
- return false unless job && jid
83
-
84
- Registry.get(job) do |strategy|
85
- return strategy.throttled?(jid, *message["args"])
87
+ Registry.get(message.job_class) do |strategy|
88
+ return strategy.throttled?(message.job_id, *message.job_args)
86
89
  end
87
90
 
88
91
  false
@@ -90,15 +93,16 @@ module Sidekiq
90
93
  false
91
94
  end
92
95
 
93
- # @deprecated Will be removed in 2.0.0
94
- def setup!
95
- warn "Sidekiq::Throttled.setup! was deprecated"
96
+ # Return throttled job to be executed later, delegating the details of how to do that
97
+ # to the Strategy for that job.
98
+ #
99
+ # @return [void]
100
+ def requeue_throttled(work)
101
+ message = JSON.parse(work.job)
102
+ job_class = Object.const_get(message.fetch("wrapped") { message.fetch("class") { return false } })
96
103
 
97
- Sidekiq.configure_server do |config|
98
- config.server_middleware do |chain|
99
- chain.remove(Sidekiq::Throttled::Middlewares::Server)
100
- chain.add(Sidekiq::Throttled::Middlewares::Server)
101
- end
104
+ Registry.get job_class do |strategy|
105
+ strategy.requeue_throttled(work, **job_class.sidekiq_throttled_requeue_options)
102
106
  end
103
107
  end
104
108
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sidekiq-throttled
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.3.0
4
+ version: 1.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Alexey Zapparov
8
8
  autorequire:
9
- bindir: exe
9
+ bindir: bin
10
10
  cert_chain: []
11
- date: 2024-01-18 00:00:00.000000000 Z
11
+ date: 2024-11-17 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: concurrent-ruby
@@ -67,6 +67,7 @@ files:
67
67
  - lib/sidekiq/throttled/errors.rb
68
68
  - lib/sidekiq/throttled/expirable_set.rb
69
69
  - lib/sidekiq/throttled/job.rb
70
+ - lib/sidekiq/throttled/message.rb
70
71
  - lib/sidekiq/throttled/middlewares/server.rb
71
72
  - lib/sidekiq/throttled/patches/basic_fetch.rb
72
73
  - lib/sidekiq/throttled/patches/super_fetch.rb
@@ -89,9 +90,9 @@ licenses:
89
90
  - MIT
90
91
  metadata:
91
92
  homepage_uri: https://github.com/ixti/sidekiq-throttled
92
- source_code_uri: https://github.com/ixti/sidekiq-throttled/tree/v1.3.0
93
+ source_code_uri: https://github.com/ixti/sidekiq-throttled/tree/v1.5.0
93
94
  bug_tracker_uri: https://github.com/ixti/sidekiq-throttled/issues
94
- changelog_uri: https://github.com/ixti/sidekiq-throttled/blob/v1.3.0/CHANGES.md
95
+ changelog_uri: https://github.com/ixti/sidekiq-throttled/blob/v1.5.0/CHANGES.md
95
96
  rubygems_mfa_required: 'true'
96
97
  post_install_message:
97
98
  rdoc_options: []
@@ -108,7 +109,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
108
109
  - !ruby/object:Gem::Version
109
110
  version: '0'
110
111
  requirements: []
111
- rubygems_version: 3.5.4
112
+ rubygems_version: 3.4.22
112
113
  signing_key:
113
114
  specification_version: 4
114
115
  summary: Concurrency and rate-limit throttling for Sidekiq