sidekiq-throttled 1.4.0 → 1.5.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7746a966e9feb8f571468c3f9f790b618faacdb6a6b405183acce7afc9210fb0
4
- data.tar.gz: 7b4a320e9ed8bab632e7834f67015b6b377071c77ac0838cf2a2db662c34ec81
3
+ metadata.gz: 7a601fd050b147fd24ab88416d29273f0cca950c7e540c6303e1c0bb9c47f66d
4
+ data.tar.gz: 72ebc4a88c60c09faa9922c4f305fbe4374a90ebcafa62f149b83cbae6a356a6
5
5
  SHA512:
6
- metadata.gz: ed36a36f934b607f08c132db2296a02741d4fad2a4436f4c475bc66f6a0cda49ece9e95782092603e4a3146de4c9b4dc7eec95223d4ba15a3c92715d861a2c91
7
- data.tar.gz: c99428f6499ddcc70d89c9b73c8dcbcae275a6a81533645dcc7d8d0d0a7f04492b876afb4d651a934e5df2ad82996925b1f551ff16a9309beba99a146c0726f9
6
+ metadata.gz: a41505e78b80af968f4aa3ab5076298d8acb62ab4b79046bbb8ff3d7cdb83d456546d066c3c431033418d5754b300cb16c30443b4f66bf82c30b14c5043b3ab4
7
+ data.tar.gz: 95b3c4839f01711aa6ff3fca8f390ab49981f7e7cf1cf3cd9c289276f945943ada4a3ec9adf96413cf1435f58fe9ea8c08f2b31fc3133e4fbbfca646d0c677b0
data/README.adoc CHANGED
@@ -31,7 +31,6 @@ Or install it yourself as:
31
31
 
32
32
  $ gem install sidekiq-throttled
33
33
 
34
-
35
34
  == Usage
36
35
 
37
36
  Add somewhere in your app's bootstrap (e.g. `config/initializers/sidekiq.rb` if
@@ -81,6 +80,17 @@ end
81
80
  ----
82
81
 
83
82
 
83
+ === Web UI
84
+
85
+ To add a Throttled tab to your sidekiq web dashboard, require it durring your
86
+ application initialization.
87
+
88
+ [source,ruby]
89
+ ----
90
+ require "sidekiq/throttled/web"
91
+ ----
92
+
93
+
84
94
  === Configuration
85
95
 
86
96
  [source,ruby]
@@ -89,16 +99,27 @@ Sidekiq::Throttled.configure do |config|
89
99
  # Period in seconds to exclude queue from polling in case it returned
90
100
  # {config.cooldown_threshold} amount of throttled jobs in a row. Set
91
101
  # this value to `nil` to disable cooldown manager completely.
92
- # Default: 2.0
93
- config.cooldown_period = 2.0
102
+ # Default: 1.0
103
+ config.cooldown_period = 1.0
94
104
 
95
105
  # Exclude queue from polling after it returned given amount of throttled
96
106
  # jobs in a row.
97
- # Default: 1 (cooldown after first throttled job)
98
- config.cooldown_threshold = 1
107
+ # Default: 100 (cooldown after hundredth throttled job in a row)
108
+ config.cooldown_threshold = 100
99
109
  end
100
110
  ----
101
111
 
112
+ [WARNING]
113
+ .Cooldown Settings
114
+ ====
115
+ If a queue contains a thousand jobs in a row that will be throttled,
116
+ the cooldown will kick-in 10 times in a row, meaning it will take 10 seconds
117
+ before all those jobs are put back at the end of the queue and you actually
118
+ start processing other jobs.
119
+
120
+ You may want to adjust the cooldown_threshold and cooldown_period,
121
+ keeping in mind that this will also impact the load on your Redis server.
122
+ ====
102
123
 
103
124
  ==== Middleware(s)
104
125
 
@@ -290,7 +311,6 @@ dropped.
290
311
 
291
312
  This library aims to support and work with following Sidekiq versions:
292
313
 
293
- * Sidekiq 6.5.x
294
314
  * Sidekiq 7.0.x
295
315
  * Sidekiq 7.1.x
296
316
  * Sidekiq 7.2.x
@@ -19,9 +19,18 @@ module Sidekiq
19
19
  # @return [Integer]
20
20
  attr_reader :cooldown_threshold
21
21
 
22
+ # Specifies how we should return throttled jobs to the queue so they can be executed later.
23
+ # Expects a hash with keys that may include :with and :to
24
+ # For :with, options are `:enqueue` (put them on the end of the queue) and `:schedule` (schedule for later).
25
+ # For :to, the name of a sidekiq queue should be specified. If none is specified, jobs will by default be
26
+ # requeued to the same queue they were originally enqueued in.
27
+ # Default: {with: `:enqueue`}
28
+ #
29
+ # @return [Hash]
30
+ attr_reader :default_requeue_options
31
+
22
32
  def initialize
23
- @cooldown_period = 2.0
24
- @cooldown_threshold = 1
33
+ reset!
25
34
  end
26
35
 
27
36
  # @!attribute [w] cooldown_period
@@ -39,6 +48,19 @@ module Sidekiq
39
48
 
40
49
  @cooldown_threshold = value
41
50
  end
51
+
52
+ # @!attribute [w] default_requeue_options
53
+ def default_requeue_options=(options)
54
+ requeue_with = options.delete(:with).intern || :enqueue
55
+
56
+ @default_requeue_options = options.merge({ with: requeue_with })
57
+ end
58
+
59
+ def reset!
60
+ @cooldown_period = 1.0
61
+ @cooldown_threshold = 100
62
+ @default_requeue_options = { with: :enqueue }
63
+ end
42
64
  end
43
65
  end
44
66
  end
@@ -13,8 +13,9 @@ module Sidekiq
13
13
  # include Sidekiq::Job
14
14
  # include Sidekiq::Throttled::Job
15
15
  #
16
- # sidekiq_options :queue => :my_queue
17
- # sidekiq_throttle :threshold => { :limit => 123, :period => 1.hour }
16
+ # sidkiq_options :queue => :my_queue
17
+ # sidekiq_throttle :threshold => { :limit => 123, :period => 1.hour },
18
+ # :requeue => { :to => :other_queue, :with => :schedule }
18
19
  #
19
20
  # def perform
20
21
  # # ...
@@ -30,6 +31,7 @@ module Sidekiq
30
31
  #
31
32
  # @private
32
33
  def self.included(base)
34
+ base.sidekiq_class_attribute :sidekiq_throttled_requeue_options
33
35
  base.extend(ClassMethods)
34
36
  end
35
37
 
@@ -71,6 +73,19 @@ module Sidekiq
71
73
  # })
72
74
  # end
73
75
  #
76
+ # @example Allow max 123 MyJob jobs per hour; when jobs are throttled, schedule them for later in :other_queue
77
+ #
78
+ # class MyJob
79
+ # include Sidekiq::Job
80
+ # include Sidekiq::Throttled::Job
81
+ #
82
+ # sidekiq_throttle({
83
+ # :threshold => { :limit => 123, :period => 1.hour },
84
+ # :requeue => { :to => :other_queue, :with => :schedule }
85
+ # })
86
+ # end
87
+ #
88
+ # @param [Hash] requeue What to do with jobs that are throttled
74
89
  # @see Registry.add
75
90
  # @return [void]
76
91
  def sidekiq_throttle(**kwargs)
@@ -15,17 +15,6 @@ module Sidekiq
15
15
 
16
16
  private
17
17
 
18
- # Pushes job back to the head of the queue, so that job won't be tried
19
- # immediately after it was requeued (in most cases).
20
- #
21
- # @note This is triggered when job is throttled. So it is same operation
22
- # Sidekiq performs upon `Sidekiq::Worker.perform_async` call.
23
- #
24
- # @return [void]
25
- def requeue_throttled(work)
26
- redis { |conn| conn.lpush(work.queue, work.job) }
27
- end
28
-
29
18
  # Returns list of queues to try to fetch jobs from.
30
19
  #
31
20
  # @note It may return an empty array.
@@ -14,19 +14,6 @@ module Sidekiq
14
14
 
15
15
  private
16
16
 
17
- # Calls SuperFetch UnitOfWork's requeue to remove the job from the
18
- # temporary queue and push job back to the head of the queue, so that
19
- # the job won't be tried immediately after it was requeued (in most cases).
20
- #
21
- # @note This is triggered when job is throttled.
22
- #
23
- # @return [void]
24
- def requeue_throttled(work)
25
- # SuperFetch UnitOfWork's requeue will remove it from the temporary
26
- # queue and then requeue it, so no acknowledgement call is needed.
27
- work.requeue
28
- end
29
-
30
17
  # Returns list of non-paused queues to try to fetch jobs from.
31
18
  #
32
19
  # @note It may return an empty array.
@@ -12,7 +12,7 @@ module Sidekiq
12
12
 
13
13
  if work && Throttled.throttled?(work.job)
14
14
  Throttled.cooldown&.notify_throttled(work.queue)
15
- requeue_throttled(work)
15
+ Throttled.requeue_throttled(work)
16
16
  return nil
17
17
  end
18
18
 
@@ -52,6 +52,16 @@ module Sidekiq
52
52
  Sidekiq.redis { |redis| 1 == SCRIPT.call(redis, keys: keys, argv: argv) }
53
53
  end
54
54
 
55
+ # @return [Float] How long, in seconds, before we'll next be able to take on jobs
56
+ def retry_in(_jid, *job_args)
57
+ job_limit = limit(job_args)
58
+ return 0.0 if !job_limit || count(*job_args) < job_limit
59
+
60
+ oldest_jid_with_score = Sidekiq.redis { |redis| redis.zrange(key(job_args), 0, 0, withscores: true) }.first
61
+ expiry_time = oldest_jid_with_score.last.to_f
62
+ expiry_time - Time.now.to_f
63
+ end
64
+
55
65
  # @return [Integer] Current count of jobs
56
66
  def count(*job_args)
57
67
  Sidekiq.redis { |conn| conn.zcard(key(job_args)) }.to_i
@@ -69,6 +69,25 @@ module Sidekiq
69
69
  Sidekiq.redis { |redis| 1 == SCRIPT.call(redis, keys: keys, argv: argv) }
70
70
  end
71
71
 
72
+ # @return [Float] How long, in seconds, before we'll next be able to take on jobs
73
+ def retry_in(*job_args)
74
+ job_limit = limit(job_args)
75
+ return 0.0 if !job_limit || count(*job_args) < job_limit
76
+
77
+ job_period = period(job_args)
78
+ job_key = key(job_args)
79
+ time_since_oldest = Time.now.to_f - Sidekiq.redis { |redis| redis.lindex(job_key, -1) }.to_f
80
+ if time_since_oldest > job_period
81
+ # The oldest job on our list is from more than the throttling period ago,
82
+ # which means we have not hit the limit this period.
83
+ 0.0
84
+ else
85
+ # If we can only have X jobs every Y minutes, then wait until Y minutes have elapsed
86
+ # since the oldest job on our list.
87
+ job_period - time_since_oldest
88
+ end
89
+ end
90
+
72
91
  # @return [Integer] Current count of jobs
73
92
  def count(*job_args)
74
93
  Sidekiq.redis { |conn| conn.llen(key(job_args)) }.to_i
@@ -11,7 +11,11 @@ module Sidekiq
11
11
  # Meta-strategy that couples {Concurrency} and {Threshold} strategies.
12
12
  #
13
13
  # @private
14
- class Strategy
14
+ class Strategy # rubocop:disable Metrics/ClassLength
15
+ # :enqueue means put the job back at the end of the queue immediately
16
+ # :schedule means schedule enqueueing the job for a later time when we expect to have capacity
17
+ VALID_VALUES_FOR_REQUEUE_WITH = %i[enqueue schedule].freeze
18
+
15
19
  # @!attribute [r] concurrency
16
20
  # @return [Strategy::Concurrency, nil]
17
21
  attr_reader :concurrency
@@ -24,6 +28,10 @@ module Sidekiq
24
28
  # @return [Proc, nil]
25
29
  attr_reader :observer
26
30
 
31
+ # @!attribute [r] requeue_options
32
+ # @return [Hash, nil]
33
+ attr_reader :requeue_options
34
+
27
35
  # @param [#to_s] name
28
36
  # @param [Hash] concurrency Concurrency options.
29
37
  # See keyword args of {Strategy::Concurrency#initialize} for details.
@@ -31,7 +39,8 @@ module Sidekiq
31
39
  # See keyword args of {Strategy::Threshold#initialize} for details.
32
40
  # @param [#call] key_suffix Dynamic key suffix generator.
33
41
  # @param [#call] observer Process called after throttled.
34
- def initialize(name, concurrency: nil, threshold: nil, key_suffix: nil, observer: nil) # rubocop:disable Metrics/MethodLength
42
+ # @param [#call] requeue What to do with jobs that are throttled.
43
+ def initialize(name, concurrency: nil, threshold: nil, key_suffix: nil, observer: nil, requeue: nil) # rubocop:disable Metrics/MethodLength, Metrics/ParameterLists
35
44
  @observer = observer
36
45
 
37
46
  @concurrency = StrategyCollection.new(concurrency,
@@ -44,9 +53,9 @@ module Sidekiq
44
53
  name: name,
45
54
  key_suffix: key_suffix)
46
55
 
47
- return if @concurrency.any? || @threshold.any?
56
+ @requeue_options = Throttled.config.default_requeue_options.merge(requeue || {})
48
57
 
49
- raise ArgumentError, "Neither :concurrency nor :threshold given"
58
+ validate!
50
59
  end
51
60
 
52
61
  # @return [Boolean] whenever strategy has dynamic config
@@ -74,18 +83,124 @@ module Sidekiq
74
83
  false
75
84
  end
76
85
 
86
+ # @return [Proc, Symbol] How to requeue the throttled job
87
+ def requeue_with
88
+ requeue_options[:with]
89
+ end
90
+
91
+ # @return [String, nil] Name of the queue to re-queue the job to.
92
+ def requeue_to
93
+ requeue_options[:to]
94
+ end
95
+
96
+ # Return throttled job to be executed later. Implementation depends on the strategy's `requeue` options.
97
+ # @return [void]
98
+ def requeue_throttled(work) # rubocop:disable Metrics/MethodLength
99
+ # Resolve :with and :to options, calling them if they are Procs
100
+ job_args = JSON.parse(work.job)["args"]
101
+ with = requeue_with.respond_to?(:call) ? requeue_with.call(*job_args) : requeue_with
102
+ target_queue = calc_target_queue(work)
103
+
104
+ case with
105
+ when :enqueue
106
+ re_enqueue_throttled(work, target_queue)
107
+ when :schedule
108
+ # Find out when we will next be able to execute this job, and reschedule for then.
109
+ reschedule_throttled(work, target_queue)
110
+ else
111
+ raise "unrecognized :with option #{with}"
112
+ end
113
+ end
114
+
77
115
  # Marks job as being processed.
78
116
  # @return [void]
79
117
  def finalize!(jid, *job_args)
80
118
  @concurrency&.finalize!(jid, *job_args)
81
119
  end
82
120
 
83
- # Resets count of jobs of all avaliable strategies
121
+ # Resets count of jobs of all available strategies
84
122
  # @return [void]
85
123
  def reset!
86
124
  @concurrency&.reset!
87
125
  @threshold&.reset!
88
126
  end
127
+
128
+ private
129
+
130
+ def validate!
131
+ unless VALID_VALUES_FOR_REQUEUE_WITH.include?(@requeue_options[:with]) ||
132
+ @requeue_options[:with].respond_to?(:call)
133
+ raise ArgumentError, "requeue: #{@requeue_options[:with]} is not a valid value for :with"
134
+ end
135
+
136
+ raise ArgumentError, "Neither :concurrency nor :threshold given" unless @concurrency.any? || @threshold.any?
137
+ end
138
+
139
+ def calc_target_queue(work) # rubocop:disable Metrics/MethodLength
140
+ target = case requeue_to
141
+ when Proc, Method
142
+ requeue_to.call(*JSON.parse(work.job)["args"])
143
+ when NilClass
144
+ work.queue
145
+ when String, Symbol
146
+ requeue_to.to_s
147
+ else
148
+ raise ArgumentError, "Invalid argument for `to`"
149
+ end
150
+
151
+ target = work.queue if target.nil? || target.empty?
152
+
153
+ target.start_with?("queue:") ? target : "queue:#{target}"
154
+ end
155
+
156
+ # Push the job back to the head of the queue.
157
+ def re_enqueue_throttled(work, target_queue)
158
+ case work.class.name
159
+ when "Sidekiq::Pro::SuperFetch::UnitOfWork"
160
+ # Calls SuperFetch UnitOfWork's requeue to remove the job from the
161
+ # temporary queue and push job back to the head of the target queue, so that
162
+ # the job won't be tried immediately after it was requeued (in most cases).
163
+ work.queue = target_queue if target_queue
164
+ work.requeue
165
+ else
166
+ # This is the same operation Sidekiq performs upon `Sidekiq::Worker.perform_async` call.
167
+ Sidekiq.redis { |conn| conn.lpush(target_queue, work.job) }
168
+ end
169
+ end
170
+
171
+ def reschedule_throttled(work, target_queue)
172
+ message = JSON.parse(work.job)
173
+ job_class = message.fetch("wrapped") { message.fetch("class") { return false } }
174
+ job_args = message["args"]
175
+
176
+ # Re-enqueue the job to the target queue at another time as a NEW unit of work
177
+ # AND THEN mark this work as done, so SuperFetch doesn't think this instance is orphaned
178
+ # Technically, the job could processed twice if the process dies between the two lines,
179
+ # but your job should be idempotent anyway, right?
180
+ # The job running twice was already a risk with SuperFetch anyway and this doesn't really increase that risk.
181
+ Sidekiq::Client.enqueue_to_in(target_queue, retry_in(work), Object.const_get(job_class), *job_args)
182
+ work.acknowledge
183
+ end
184
+
185
+ def retry_in(work)
186
+ message = JSON.parse(work.job)
187
+ jid = message.fetch("jid") { return false }
188
+ job_args = message["args"]
189
+
190
+ # Ask both concurrency and threshold, if relevant, how long minimum until we can retry.
191
+ # If we get two answers, take the longer one.
192
+ intervals = [@concurrency&.retry_in(jid, *job_args), @threshold&.retry_in(*job_args)].compact
193
+
194
+ raise "Cannot compute a valid retry interval" if intervals.empty?
195
+
196
+ interval = intervals.max
197
+
198
+ # Add a random amount of jitter, proportional to the length of the minimum retry time.
199
+ # This helps spread out jobs more evenly and avoid clumps of jobs on the queue.
200
+ interval += rand(interval / 5) if interval > 10
201
+
202
+ interval
203
+ end
89
204
  end
90
205
  end
91
206
  end
@@ -41,6 +41,11 @@ module Sidekiq
41
41
  any? { |s| s.throttled?(...) }
42
42
  end
43
43
 
44
+ # @return [Float] How long, in seconds, before we'll next be able to take on jobs
45
+ def retry_in(*args)
46
+ max { |s| s.retry_in(*args) }
47
+ end
48
+
44
49
  # Marks job as being processed.
45
50
  # @return [void]
46
51
  def finalize!(...)
@@ -3,6 +3,6 @@
3
3
  module Sidekiq
4
4
  module Throttled
5
5
  # Gem version
6
- VERSION = "1.4.0"
6
+ VERSION = "1.5.1"
7
7
  end
8
8
  end
@@ -54,6 +54,11 @@ module Sidekiq
54
54
  # @return [Cooldown, nil]
55
55
  attr_reader :cooldown
56
56
 
57
+ # @api internal
58
+ #
59
+ # @return [Config, nil]
60
+ attr_reader :config
61
+
57
62
  # @example
58
63
  # Sidekiq::Throttled.configure do |config|
59
64
  # config.cooldown_period = nil # Disable queues cooldown manager
@@ -88,15 +93,16 @@ module Sidekiq
88
93
  false
89
94
  end
90
95
 
91
- # @deprecated Will be removed in 2.0.0
92
- def setup!
93
- warn "Sidekiq::Throttled.setup! was deprecated"
96
+ # Return throttled job to be executed later, delegating the details of how to do that
97
+ # to the Strategy for that job.
98
+ #
99
+ # @return [void]
100
+ def requeue_throttled(work)
101
+ message = JSON.parse(work.job)
102
+ job_class = Object.const_get(message.fetch("wrapped") { message.fetch("class") { return false } })
94
103
 
95
- Sidekiq.configure_server do |config|
96
- config.server_middleware do |chain|
97
- chain.remove(Sidekiq::Throttled::Middlewares::Server)
98
- chain.add(Sidekiq::Throttled::Middlewares::Server)
99
- end
104
+ Registry.get job_class do |strategy|
105
+ strategy.requeue_throttled(work)
100
106
  end
101
107
  end
102
108
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sidekiq-throttled
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.4.0
4
+ version: 1.5.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Alexey Zapparov
8
8
  autorequire:
9
- bindir: exe
9
+ bindir: bin
10
10
  cert_chain: []
11
- date: 2024-04-07 00:00:00.000000000 Z
11
+ date: 2024-12-09 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: concurrent-ruby
@@ -90,9 +90,9 @@ licenses:
90
90
  - MIT
91
91
  metadata:
92
92
  homepage_uri: https://github.com/ixti/sidekiq-throttled
93
- source_code_uri: https://github.com/ixti/sidekiq-throttled/tree/v1.4.0
93
+ source_code_uri: https://github.com/ixti/sidekiq-throttled/tree/v1.5.1
94
94
  bug_tracker_uri: https://github.com/ixti/sidekiq-throttled/issues
95
- changelog_uri: https://github.com/ixti/sidekiq-throttled/blob/v1.4.0/CHANGES.md
95
+ changelog_uri: https://github.com/ixti/sidekiq-throttled/blob/v1.5.1/CHANGES.md
96
96
  rubygems_mfa_required: 'true'
97
97
  post_install_message:
98
98
  rdoc_options: []
@@ -109,7 +109,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
109
109
  - !ruby/object:Gem::Version
110
110
  version: '0'
111
111
  requirements: []
112
- rubygems_version: 3.5.4
112
+ rubygems_version: 3.4.22
113
113
  signing_key:
114
114
  specification_version: 4
115
115
  summary: Concurrency and rate-limit throttling for Sidekiq