sidekiq-throttled 1.5.0 → 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.adoc +27 -0
- data/lib/sidekiq/throttled/job.rb +0 -9
- data/lib/sidekiq/throttled/strategy.rb +60 -29
- data/lib/sidekiq/throttled/strategy_collection.rb +1 -1
- data/lib/sidekiq/throttled/version.rb +1 -1
- data/lib/sidekiq/throttled.rb +1 -1
- metadata +5 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 07f60ab84ab53725f6189d8a142ccb7d5596ea408a5d52087e2665420c92a276
|
4
|
+
data.tar.gz: 489d3d9a5a72dc20af8295d6942d4abdc9a27dfffdf917b753a08af78184d59e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: bac5ad071e0de8913f80f8c78cec87ee98c61da77a48c46000ba3680f0eeb684b2970c69d256b61cf5d1b680121f2762a0ae3c9220729ee25140ba6cbf3f9598
|
7
|
+
data.tar.gz: 92d08e2c0f76716816385b24df2c06092fa416ceb22714657967e2e312fe04357000db45fc18ee4f99d5eab57b6c267d93556a8064cb704478590b457ec48f2f
|
data/README.adoc
CHANGED
@@ -262,6 +262,33 @@ IMPORTANT: Don't forget to specify `:key_suffix` and make it return different
|
|
262
262
|
values if you are using dynamic limit/period options. Otherwise, you risk
|
263
263
|
getting into some trouble.
|
264
264
|
|
265
|
+
[source,ruby]
|
266
|
+
----
|
267
|
+
class MyJob
|
268
|
+
include Sidekiq::Job
|
269
|
+
include Sidekiq::Throttled::Job
|
270
|
+
|
271
|
+
sidekiq_options queue: :my_queue
|
272
|
+
|
273
|
+
sidekiq_throttle(
|
274
|
+
concurrency: { limit: 10 },
|
275
|
+
# Allow 500 jobs per minute, 5,000 per hour, and 50,000 per day:
|
276
|
+
threshold: [
|
277
|
+
{ limit: 500, period: 1.minute, key_suffix: "minutely" },
|
278
|
+
{ limit: 5_000, period: 1.hour, key_suffix: "hourly" },
|
279
|
+
{ limit: 50_000, period: 1.day, key_suffix: "daily" },
|
280
|
+
]
|
281
|
+
)
|
282
|
+
|
283
|
+
def perform(project_id, user_id)
|
284
|
+
# ...
|
285
|
+
end
|
286
|
+
end
|
287
|
+
----
|
288
|
+
|
289
|
+
NOTE: `key_suffix` does not have to be a proc/lambda, it can just be a
|
290
|
+
string value. This can come in handy to set throttle limits for different
|
291
|
+
ranges of time
|
265
292
|
|
266
293
|
=== Concurrency throttling fine-tuning
|
267
294
|
|
@@ -24,8 +24,6 @@ module Sidekiq
|
|
24
24
|
#
|
25
25
|
# @see ClassMethods
|
26
26
|
module Job
|
27
|
-
VALID_VALUES_FOR_REQUEUE_WITH = %i[enqueue schedule].freeze
|
28
|
-
|
29
27
|
# Extends worker class with {ClassMethods}.
|
30
28
|
#
|
31
29
|
# @note Using `included` hook with extending worker with {ClassMethods}
|
@@ -91,13 +89,6 @@ module Sidekiq
|
|
91
89
|
# @see Registry.add
|
92
90
|
# @return [void]
|
93
91
|
def sidekiq_throttle(**kwargs)
|
94
|
-
requeue_options = Throttled.config.default_requeue_options.merge(kwargs.delete(:requeue) || {})
|
95
|
-
unless VALID_VALUES_FOR_REQUEUE_WITH.include?(requeue_options[:with])
|
96
|
-
raise ArgumentError, "requeue: #{requeue_options[:with]} is not a valid value for :with"
|
97
|
-
end
|
98
|
-
|
99
|
-
self.sidekiq_throttled_requeue_options = requeue_options
|
100
|
-
|
101
92
|
Registry.add(self, **kwargs)
|
102
93
|
end
|
103
94
|
|
@@ -11,7 +11,11 @@ module Sidekiq
|
|
11
11
|
# Meta-strategy that couples {Concurrency} and {Threshold} strategies.
|
12
12
|
#
|
13
13
|
# @private
|
14
|
-
class Strategy
|
14
|
+
class Strategy # rubocop:disable Metrics/ClassLength
|
15
|
+
# :enqueue means put the job back at the end of the queue immediately
|
16
|
+
# :schedule means schedule enqueueing the job for a later time when we expect to have capacity
|
17
|
+
VALID_VALUES_FOR_REQUEUE_WITH = %i[enqueue schedule].freeze
|
18
|
+
|
15
19
|
# @!attribute [r] concurrency
|
16
20
|
# @return [Strategy::Concurrency, nil]
|
17
21
|
attr_reader :concurrency
|
@@ -24,6 +28,10 @@ module Sidekiq
|
|
24
28
|
# @return [Proc, nil]
|
25
29
|
attr_reader :observer
|
26
30
|
|
31
|
+
# @!attribute [r] requeue_options
|
32
|
+
# @return [Hash, nil]
|
33
|
+
attr_reader :requeue_options
|
34
|
+
|
27
35
|
# @param [#to_s] name
|
28
36
|
# @param [Hash] concurrency Concurrency options.
|
29
37
|
# See keyword args of {Strategy::Concurrency#initialize} for details.
|
@@ -31,7 +39,8 @@ module Sidekiq
|
|
31
39
|
# See keyword args of {Strategy::Threshold#initialize} for details.
|
32
40
|
# @param [#call] key_suffix Dynamic key suffix generator.
|
33
41
|
# @param [#call] observer Process called after throttled.
|
34
|
-
|
42
|
+
# @param [#call] requeue What to do with jobs that are throttled.
|
43
|
+
def initialize(name, concurrency: nil, threshold: nil, key_suffix: nil, observer: nil, requeue: nil) # rubocop:disable Metrics/MethodLength, Metrics/ParameterLists
|
35
44
|
@observer = observer
|
36
45
|
|
37
46
|
@concurrency = StrategyCollection.new(concurrency,
|
@@ -44,7 +53,9 @@ module Sidekiq
|
|
44
53
|
name: name,
|
45
54
|
key_suffix: key_suffix)
|
46
55
|
|
47
|
-
|
56
|
+
@requeue_options = Throttled.config.default_requeue_options.merge(requeue || {})
|
57
|
+
|
58
|
+
validate!
|
48
59
|
end
|
49
60
|
|
50
61
|
# @return [Boolean] whenever strategy has dynamic config
|
@@ -72,26 +83,30 @@ module Sidekiq
|
|
72
83
|
false
|
73
84
|
end
|
74
85
|
|
75
|
-
#
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
# @
|
81
|
-
|
86
|
+
# @return [Proc, Symbol] How to requeue the throttled job
|
87
|
+
def requeue_with
|
88
|
+
requeue_options[:with]
|
89
|
+
end
|
90
|
+
|
91
|
+
# @return [String, nil] Name of the queue to re-queue the job to.
|
92
|
+
def requeue_to
|
93
|
+
requeue_options[:to]
|
94
|
+
end
|
95
|
+
|
96
|
+
# Return throttled job to be executed later. Implementation depends on the strategy's `requeue` options.
|
82
97
|
# @return [void]
|
83
|
-
def requeue_throttled(work
|
84
|
-
# Resolve :with and :to
|
98
|
+
def requeue_throttled(work) # rubocop:disable Metrics/MethodLength
|
99
|
+
# Resolve :with and :to options, calling them if they are Procs
|
85
100
|
job_args = JSON.parse(work.job)["args"]
|
86
|
-
|
87
|
-
target_queue = calc_target_queue(work
|
101
|
+
with = requeue_with.respond_to?(:call) ? requeue_with.call(*job_args) : requeue_with
|
102
|
+
target_queue = calc_target_queue(work)
|
88
103
|
|
89
|
-
case
|
104
|
+
case with
|
90
105
|
when :enqueue
|
91
106
|
re_enqueue_throttled(work, target_queue)
|
92
107
|
when :schedule
|
93
108
|
# Find out when we will next be able to execute this job, and reschedule for then.
|
94
|
-
reschedule_throttled(work,
|
109
|
+
reschedule_throttled(work, target_queue)
|
95
110
|
else
|
96
111
|
raise "unrecognized :with option #{with}"
|
97
112
|
end
|
@@ -112,49 +127,65 @@ module Sidekiq
|
|
112
127
|
|
113
128
|
private
|
114
129
|
|
115
|
-
def
|
116
|
-
|
130
|
+
def validate!
|
131
|
+
unless VALID_VALUES_FOR_REQUEUE_WITH.include?(@requeue_options[:with]) ||
|
132
|
+
@requeue_options[:with].respond_to?(:call)
|
133
|
+
raise ArgumentError, "requeue: #{@requeue_options[:with]} is not a valid value for :with"
|
134
|
+
end
|
135
|
+
|
136
|
+
raise ArgumentError, "Neither :concurrency nor :threshold given" unless @concurrency.any? || @threshold.any?
|
137
|
+
end
|
138
|
+
|
139
|
+
def calc_target_queue(work) # rubocop:disable Metrics/MethodLength
|
140
|
+
target = case requeue_to
|
117
141
|
when Proc, Method
|
118
|
-
|
142
|
+
requeue_to.call(*JSON.parse(work.job)["args"])
|
119
143
|
when NilClass
|
120
144
|
work.queue
|
121
145
|
when String, Symbol
|
122
|
-
|
146
|
+
requeue_to
|
123
147
|
else
|
124
148
|
raise ArgumentError, "Invalid argument for `to`"
|
125
149
|
end
|
126
150
|
|
127
151
|
target = work.queue if target.nil? || target.empty?
|
128
152
|
|
129
|
-
target.
|
153
|
+
target.to_s
|
130
154
|
end
|
131
155
|
|
132
156
|
# Push the job back to the head of the queue.
|
133
|
-
|
157
|
+
# The queue name is expected to include the "queue:" prefix, so we add it if it's missing.
|
158
|
+
def re_enqueue_throttled(work, target_queue)
|
159
|
+
target_queue = "queue:#{target_queue}" unless target_queue.start_with?("queue:")
|
160
|
+
|
134
161
|
case work.class.name
|
135
162
|
when "Sidekiq::Pro::SuperFetch::UnitOfWork"
|
136
163
|
# Calls SuperFetch UnitOfWork's requeue to remove the job from the
|
137
164
|
# temporary queue and push job back to the head of the target queue, so that
|
138
165
|
# the job won't be tried immediately after it was requeued (in most cases).
|
139
|
-
work.queue =
|
166
|
+
work.queue = target_queue
|
140
167
|
work.requeue
|
141
168
|
else
|
142
169
|
# This is the same operation Sidekiq performs upon `Sidekiq::Worker.perform_async` call.
|
143
|
-
Sidekiq.redis { |conn| conn.lpush(
|
170
|
+
Sidekiq.redis { |conn| conn.lpush(target_queue, work.job) }
|
144
171
|
end
|
145
172
|
end
|
146
173
|
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
174
|
+
# Reschedule the job to be executed later in the target queue.
|
175
|
+
# The queue name should NOT include the "queue:" prefix, so we remove it if it's present.
|
176
|
+
def reschedule_throttled(work, target_queue)
|
177
|
+
target_queue = target_queue.delete_prefix("queue:")
|
178
|
+
message = JSON.parse(work.job)
|
179
|
+
job_class = message.fetch("wrapped") { message.fetch("class") { return false } }
|
180
|
+
job_args = message["args"]
|
151
181
|
|
152
182
|
# Re-enqueue the job to the target queue at another time as a NEW unit of work
|
153
183
|
# AND THEN mark this work as done, so SuperFetch doesn't think this instance is orphaned
|
154
184
|
# Technically, the job could processed twice if the process dies between the two lines,
|
155
185
|
# but your job should be idempotent anyway, right?
|
156
186
|
# The job running twice was already a risk with SuperFetch anyway and this doesn't really increase that risk.
|
157
|
-
Sidekiq::Client.enqueue_to_in(
|
187
|
+
Sidekiq::Client.enqueue_to_in(target_queue, retry_in(work), Object.const_get(job_class), *job_args)
|
188
|
+
|
158
189
|
work.acknowledge
|
159
190
|
end
|
160
191
|
|
data/lib/sidekiq/throttled.rb
CHANGED
@@ -102,7 +102,7 @@ module Sidekiq
|
|
102
102
|
job_class = Object.const_get(message.fetch("wrapped") { message.fetch("class") { return false } })
|
103
103
|
|
104
104
|
Registry.get job_class do |strategy|
|
105
|
-
strategy.requeue_throttled(work
|
105
|
+
strategy.requeue_throttled(work)
|
106
106
|
end
|
107
107
|
end
|
108
108
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: sidekiq-throttled
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.5.
|
4
|
+
version: 1.5.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Alexey Zapparov
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2025-01-12 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: concurrent-ruby
|
@@ -90,9 +90,9 @@ licenses:
|
|
90
90
|
- MIT
|
91
91
|
metadata:
|
92
92
|
homepage_uri: https://github.com/ixti/sidekiq-throttled
|
93
|
-
source_code_uri: https://github.com/ixti/sidekiq-throttled/tree/v1.5.
|
93
|
+
source_code_uri: https://github.com/ixti/sidekiq-throttled/tree/v1.5.2
|
94
94
|
bug_tracker_uri: https://github.com/ixti/sidekiq-throttled/issues
|
95
|
-
changelog_uri: https://github.com/ixti/sidekiq-throttled/blob/v1.5.
|
95
|
+
changelog_uri: https://github.com/ixti/sidekiq-throttled/blob/v1.5.2/CHANGES.md
|
96
96
|
rubygems_mfa_required: 'true'
|
97
97
|
post_install_message:
|
98
98
|
rdoc_options: []
|
@@ -109,7 +109,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
109
109
|
- !ruby/object:Gem::Version
|
110
110
|
version: '0'
|
111
111
|
requirements: []
|
112
|
-
rubygems_version: 3.
|
112
|
+
rubygems_version: 3.5.22
|
113
113
|
signing_key:
|
114
114
|
specification_version: 4
|
115
115
|
summary: Concurrency and rate-limit throttling for Sidekiq
|