sidekiq 6.5.12 → 7.2.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Changes.md +224 -20
- data/README.md +43 -35
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiq +3 -8
- data/bin/sidekiqload +204 -118
- data/bin/sidekiqmon +3 -0
- data/lib/sidekiq/api.rb +187 -135
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +59 -75
- data/lib/sidekiq/client.rb +66 -37
- data/lib/sidekiq/component.rb +4 -1
- data/lib/sidekiq/config.rb +287 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +11 -14
- data/lib/sidekiq/job.rb +371 -10
- data/lib/sidekiq/job_logger.rb +2 -2
- data/lib/sidekiq/job_retry.rb +36 -18
- data/lib/sidekiq/job_util.rb +51 -15
- data/lib/sidekiq/launcher.rb +71 -65
- data/lib/sidekiq/logger.rb +2 -27
- data/lib/sidekiq/manager.rb +9 -11
- data/lib/sidekiq/metrics/query.rb +7 -4
- data/lib/sidekiq/metrics/shared.rb +8 -7
- data/lib/sidekiq/metrics/tracking.rb +27 -21
- data/lib/sidekiq/middleware/chain.rb +19 -18
- data/lib/sidekiq/middleware/current_attributes.rb +52 -20
- data/lib/sidekiq/monitor.rb +16 -3
- data/lib/sidekiq/paginator.rb +2 -2
- data/lib/sidekiq/processor.rb +46 -51
- data/lib/sidekiq/rails.rb +15 -10
- data/lib/sidekiq/redis_client_adapter.rb +23 -66
- data/lib/sidekiq/redis_connection.rb +15 -117
- data/lib/sidekiq/scheduled.rb +22 -23
- data/lib/sidekiq/testing.rb +32 -41
- data/lib/sidekiq/transaction_aware_client.rb +11 -5
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/action.rb +8 -3
- data/lib/sidekiq/web/application.rb +108 -15
- data/lib/sidekiq/web/csrf_protection.rb +10 -7
- data/lib/sidekiq/web/helpers.rb +52 -38
- data/lib/sidekiq/web.rb +17 -16
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +76 -274
- data/sidekiq.gemspec +12 -10
- data/web/assets/javascripts/application.js +39 -0
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/dashboard-charts.js +182 -0
- data/web/assets/javascripts/dashboard.js +10 -232
- data/web/assets/javascripts/metrics.js +151 -115
- data/web/assets/stylesheets/application-dark.css +4 -0
- data/web/assets/stylesheets/application-rtl.css +10 -89
- data/web/assets/stylesheets/application.css +45 -298
- data/web/locales/ar.yml +70 -70
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -65
- data/web/locales/el.yml +2 -7
- data/web/locales/en.yml +78 -70
- data/web/locales/es.yml +68 -68
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +81 -67
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +67 -69
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +66 -66
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +79 -69
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +67 -66
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +67 -67
- data/web/locales/zh-cn.yml +20 -18
- data/web/locales/zh-tw.yml +10 -1
- data/web/views/_footer.erb +17 -2
- data/web/views/_job_info.erb +18 -2
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +1 -1
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +46 -35
- data/web/views/dashboard.erb +26 -5
- data/web/views/filtering.erb +7 -0
- data/web/views/metrics.erb +46 -24
- data/web/views/metrics_for_job.erb +41 -69
- data/web/views/morgue.erb +5 -9
- data/web/views/queue.erb +10 -14
- data/web/views/queues.erb +9 -3
- data/web/views/retries.erb +5 -9
- data/web/views/scheduled.erb +12 -13
- metadata +44 -38
- data/lib/sidekiq/delay.rb +0 -43
- data/lib/sidekiq/extensions/action_mailer.rb +0 -48
- data/lib/sidekiq/extensions/active_record.rb +0 -43
- data/lib/sidekiq/extensions/class_methods.rb +0 -43
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
- data/lib/sidekiq/metrics/deploy.rb +0 -47
- data/lib/sidekiq/worker.rb +0 -370
- data/web/assets/javascripts/graph.js +0 -16
- /data/{LICENSE → LICENSE.txt} +0 -0
@@ -0,0 +1,287 @@
|
|
1
|
+
require "forwardable"
|
2
|
+
|
3
|
+
require "set"
|
4
|
+
require "sidekiq/redis_connection"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
# Sidekiq::Config represents the global configuration for an instance of Sidekiq.
|
8
|
+
class Config
|
9
|
+
extend Forwardable
|
10
|
+
|
11
|
+
DEFAULTS = {
|
12
|
+
labels: Set.new,
|
13
|
+
require: ".",
|
14
|
+
environment: nil,
|
15
|
+
concurrency: 5,
|
16
|
+
timeout: 25,
|
17
|
+
poll_interval_average: nil,
|
18
|
+
average_scheduled_poll_interval: 5,
|
19
|
+
on_complex_arguments: :raise,
|
20
|
+
error_handlers: [],
|
21
|
+
death_handlers: [],
|
22
|
+
lifecycle_events: {
|
23
|
+
startup: [],
|
24
|
+
quiet: [],
|
25
|
+
shutdown: [],
|
26
|
+
# triggers when we fire the first heartbeat on startup OR repairing a network partition
|
27
|
+
heartbeat: [],
|
28
|
+
# triggers on EVERY heartbeat call, every 10 seconds
|
29
|
+
beat: []
|
30
|
+
},
|
31
|
+
dead_max_jobs: 10_000,
|
32
|
+
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
|
33
|
+
reloader: proc { |&block| block.call },
|
34
|
+
backtrace_cleaner: ->(backtrace) { backtrace }
|
35
|
+
}
|
36
|
+
|
37
|
+
ERROR_HANDLER = ->(ex, ctx, cfg = Sidekiq.default_configuration) {
|
38
|
+
l = cfg.logger
|
39
|
+
l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
|
40
|
+
l.warn("#{ex.class.name}: #{ex.message}")
|
41
|
+
unless ex.backtrace.nil?
|
42
|
+
backtrace = cfg[:backtrace_cleaner].call(ex.backtrace)
|
43
|
+
l.warn(backtrace.join("\n"))
|
44
|
+
end
|
45
|
+
}
|
46
|
+
|
47
|
+
def initialize(options = {})
|
48
|
+
@options = DEFAULTS.merge(options)
|
49
|
+
@options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
|
50
|
+
@directory = {}
|
51
|
+
@redis_config = {}
|
52
|
+
@capsules = {}
|
53
|
+
end
|
54
|
+
|
55
|
+
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
|
56
|
+
attr_reader :capsules
|
57
|
+
|
58
|
+
def to_json(*)
|
59
|
+
Sidekiq.dump_json(@options)
|
60
|
+
end
|
61
|
+
|
62
|
+
# LEGACY: edits the default capsule
|
63
|
+
# config.concurrency = 5
|
64
|
+
def concurrency=(val)
|
65
|
+
default_capsule.concurrency = Integer(val)
|
66
|
+
end
|
67
|
+
|
68
|
+
def concurrency
|
69
|
+
default_capsule.concurrency
|
70
|
+
end
|
71
|
+
|
72
|
+
def total_concurrency
|
73
|
+
capsules.each_value.sum(&:concurrency)
|
74
|
+
end
|
75
|
+
|
76
|
+
# Edit the default capsule.
|
77
|
+
# config.queues = %w( high default low ) # strict
|
78
|
+
# config.queues = %w( high,3 default,2 low,1 ) # weighted
|
79
|
+
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
|
80
|
+
#
|
81
|
+
# With weighted priority, queue will be checked first (weight / total) of the time.
|
82
|
+
# high will be checked first (3/6) or 50% of the time.
|
83
|
+
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
|
84
|
+
# are ridiculous and unnecessarily expensive. You can get random queue ordering
|
85
|
+
# by explicitly setting all weights to 1.
|
86
|
+
def queues=(val)
|
87
|
+
default_capsule.queues = val
|
88
|
+
end
|
89
|
+
|
90
|
+
def queues
|
91
|
+
default_capsule.queues
|
92
|
+
end
|
93
|
+
|
94
|
+
def client_middleware
|
95
|
+
@client_chain ||= Sidekiq::Middleware::Chain.new(self)
|
96
|
+
yield @client_chain if block_given?
|
97
|
+
@client_chain
|
98
|
+
end
|
99
|
+
|
100
|
+
def server_middleware
|
101
|
+
@server_chain ||= Sidekiq::Middleware::Chain.new(self)
|
102
|
+
yield @server_chain if block_given?
|
103
|
+
@server_chain
|
104
|
+
end
|
105
|
+
|
106
|
+
def default_capsule(&block)
|
107
|
+
capsule("default", &block)
|
108
|
+
end
|
109
|
+
|
110
|
+
# register a new queue processing subsystem
|
111
|
+
def capsule(name)
|
112
|
+
nm = name.to_s
|
113
|
+
cap = @capsules.fetch(nm) do
|
114
|
+
cap = Sidekiq::Capsule.new(nm, self)
|
115
|
+
@capsules[nm] = cap
|
116
|
+
end
|
117
|
+
yield cap if block_given?
|
118
|
+
cap
|
119
|
+
end
|
120
|
+
|
121
|
+
# All capsules must use the same Redis configuration
|
122
|
+
def redis=(hash)
|
123
|
+
@redis_config = @redis_config.merge(hash)
|
124
|
+
end
|
125
|
+
|
126
|
+
def redis_pool
|
127
|
+
Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
|
128
|
+
end
|
129
|
+
|
130
|
+
private def local_redis_pool
|
131
|
+
# this is our internal client/housekeeping pool. each capsule has its
|
132
|
+
# own pool for executing threads.
|
133
|
+
@redis ||= new_redis_pool(10, "internal")
|
134
|
+
end
|
135
|
+
|
136
|
+
def new_redis_pool(size, name = "unset")
|
137
|
+
# connection pool is lazy, it will not create connections unless you actually need them
|
138
|
+
# so don't be skimpy!
|
139
|
+
RedisConnection.create({size: size, logger: logger, pool_name: name}.merge(@redis_config))
|
140
|
+
end
|
141
|
+
|
142
|
+
def redis_info
|
143
|
+
redis do |conn|
|
144
|
+
conn.call("INFO") { |i| i.lines(chomp: true).map { |l| l.split(":", 2) }.select { |l| l.size == 2 }.to_h }
|
145
|
+
rescue RedisClientAdapter::CommandError => ex
|
146
|
+
# 2850 return fake version when INFO command has (probably) been renamed
|
147
|
+
raise unless /unknown command/.match?(ex.message)
|
148
|
+
{
|
149
|
+
"redis_version" => "9.9.9",
|
150
|
+
"uptime_in_days" => "9999",
|
151
|
+
"connected_clients" => "9999",
|
152
|
+
"used_memory_human" => "9P",
|
153
|
+
"used_memory_peak_human" => "9P"
|
154
|
+
}.freeze
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
158
|
+
def redis
|
159
|
+
raise ArgumentError, "requires a block" unless block_given?
|
160
|
+
redis_pool.with do |conn|
|
161
|
+
retryable = true
|
162
|
+
begin
|
163
|
+
yield conn
|
164
|
+
rescue RedisClientAdapter::BaseError => ex
|
165
|
+
# 2550 Failover can cause the server to become a replica, need
|
166
|
+
# to disconnect and reopen the socket to get back to the primary.
|
167
|
+
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
168
|
+
# 4985 Use the same logic when a blocking command is force-unblocked
|
169
|
+
# The same retry logic is also used in client.rb
|
170
|
+
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
|
171
|
+
conn.close
|
172
|
+
retryable = false
|
173
|
+
retry
|
174
|
+
end
|
175
|
+
raise
|
176
|
+
end
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
# register global singletons which can be accessed elsewhere
|
181
|
+
def register(name, instance)
|
182
|
+
@directory[name] = instance
|
183
|
+
end
|
184
|
+
|
185
|
+
# find a singleton
|
186
|
+
def lookup(name, default_class = nil)
|
187
|
+
# JNDI is just a fancy name for a hash lookup
|
188
|
+
@directory.fetch(name) do |key|
|
189
|
+
return nil unless default_class
|
190
|
+
@directory[key] = default_class.new(self)
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
##
|
195
|
+
# Death handlers are called when all retries for a job have been exhausted and
|
196
|
+
# the job dies. It's the notification to your application
|
197
|
+
# that this job will not succeed without manual intervention.
|
198
|
+
#
|
199
|
+
# Sidekiq.configure_server do |config|
|
200
|
+
# config.death_handlers << ->(job, ex) do
|
201
|
+
# end
|
202
|
+
# end
|
203
|
+
def death_handlers
|
204
|
+
@options[:death_handlers]
|
205
|
+
end
|
206
|
+
|
207
|
+
# How frequently Redis should be checked by a random Sidekiq process for
|
208
|
+
# scheduled and retriable jobs. Each individual process will take turns by
|
209
|
+
# waiting some multiple of this value.
|
210
|
+
#
|
211
|
+
# See sidekiq/scheduled.rb for an in-depth explanation of this value
|
212
|
+
def average_scheduled_poll_interval=(interval)
|
213
|
+
@options[:average_scheduled_poll_interval] = interval
|
214
|
+
end
|
215
|
+
|
216
|
+
# Register a proc to handle any error which occurs within the Sidekiq process.
|
217
|
+
#
|
218
|
+
# Sidekiq.configure_server do |config|
|
219
|
+
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
|
220
|
+
# end
|
221
|
+
#
|
222
|
+
# The default error handler logs errors to @logger.
|
223
|
+
def error_handlers
|
224
|
+
@options[:error_handlers]
|
225
|
+
end
|
226
|
+
|
227
|
+
# Register a block to run at a point in the Sidekiq lifecycle.
|
228
|
+
# :startup, :quiet or :shutdown are valid events.
|
229
|
+
#
|
230
|
+
# Sidekiq.configure_server do |config|
|
231
|
+
# config.on(:shutdown) do
|
232
|
+
# puts "Goodbye cruel world!"
|
233
|
+
# end
|
234
|
+
# end
|
235
|
+
def on(event, &block)
|
236
|
+
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
|
237
|
+
raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
|
238
|
+
@options[:lifecycle_events][event] << block
|
239
|
+
end
|
240
|
+
|
241
|
+
def logger
|
242
|
+
@logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
|
243
|
+
log.level = Logger::INFO
|
244
|
+
log.formatter = if ENV["DYNO"]
|
245
|
+
Sidekiq::Logger::Formatters::WithoutTimestamp.new
|
246
|
+
else
|
247
|
+
Sidekiq::Logger::Formatters::Pretty.new
|
248
|
+
end
|
249
|
+
end
|
250
|
+
end
|
251
|
+
|
252
|
+
def logger=(logger)
|
253
|
+
if logger.nil?
|
254
|
+
self.logger.level = Logger::FATAL
|
255
|
+
return
|
256
|
+
end
|
257
|
+
|
258
|
+
@logger = logger
|
259
|
+
end
|
260
|
+
|
261
|
+
private def parameter_size(handler)
|
262
|
+
target = handler.is_a?(Proc) ? handler : handler.method(:call)
|
263
|
+
target.parameters.size
|
264
|
+
end
|
265
|
+
|
266
|
+
# INTERNAL USE ONLY
|
267
|
+
def handle_exception(ex, ctx = {})
|
268
|
+
if @options[:error_handlers].size == 0
|
269
|
+
p ["!!!!!", ex]
|
270
|
+
end
|
271
|
+
@options[:error_handlers].each do |handler|
|
272
|
+
if parameter_size(handler) == 2
|
273
|
+
# TODO Remove in 8.0
|
274
|
+
logger.info { "DEPRECATION: Sidekiq exception handlers now take three arguments, see #{handler}" }
|
275
|
+
handler.call(ex, {_config: self}.merge(ctx))
|
276
|
+
else
|
277
|
+
handler.call(ex, ctx, self)
|
278
|
+
end
|
279
|
+
rescue Exception => e
|
280
|
+
l = logger
|
281
|
+
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
|
282
|
+
l.error e
|
283
|
+
l.error e.backtrace.join("\n") unless e.backtrace.nil?
|
284
|
+
end
|
285
|
+
end
|
286
|
+
end
|
287
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
require "sidekiq/redis_connection"
|
2
|
+
require "time"
|
3
|
+
|
4
|
+
# This file is designed to be required within the user's
|
5
|
+
# deployment script; it should need a bare minimum of dependencies.
|
6
|
+
# Usage:
|
7
|
+
#
|
8
|
+
# require "sidekiq/deploy"
|
9
|
+
# Sidekiq::Deploy.mark!("Some change")
|
10
|
+
#
|
11
|
+
# If you do not pass a label, Sidekiq will try to use the latest
|
12
|
+
# git commit info.
|
13
|
+
#
|
14
|
+
|
15
|
+
module Sidekiq
|
16
|
+
class Deploy
|
17
|
+
MARK_TTL = 90 * 24 * 60 * 60 # 90 days
|
18
|
+
|
19
|
+
LABEL_MAKER = -> {
|
20
|
+
`git log -1 --format="%h %s"`.strip
|
21
|
+
}
|
22
|
+
|
23
|
+
def self.mark!(label = nil)
|
24
|
+
Sidekiq::Deploy.new.mark!(label: label)
|
25
|
+
end
|
26
|
+
|
27
|
+
def initialize(pool = Sidekiq::RedisConnection.create)
|
28
|
+
@pool = pool
|
29
|
+
end
|
30
|
+
|
31
|
+
def mark!(at: Time.now, label: nil)
|
32
|
+
label ||= LABEL_MAKER.call
|
33
|
+
# we need to round the timestamp so that we gracefully
|
34
|
+
# handle an very common error in marking deploys:
|
35
|
+
# having every process mark its deploy, leading
|
36
|
+
# to N marks for each deploy. Instead we round the time
|
37
|
+
# to the minute so that multiple marks within that minute
|
38
|
+
# will all naturally rollup into one mark per minute.
|
39
|
+
whence = at.utc
|
40
|
+
floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
|
41
|
+
datecode = floor.strftime("%Y%m%d")
|
42
|
+
key = "#{datecode}-marks"
|
43
|
+
stamp = floor.iso8601
|
44
|
+
|
45
|
+
@pool.with do |c|
|
46
|
+
# only allow one deploy mark for a given label for the next minute
|
47
|
+
lock = c.set("deploylock-#{label}", stamp, "nx", "ex", "60")
|
48
|
+
if lock
|
49
|
+
c.multi do |pipe|
|
50
|
+
pipe.hsetnx(key, stamp, label)
|
51
|
+
pipe.expire(key, MARK_TTL)
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
def fetch(date = Time.now.utc.to_date)
|
58
|
+
datecode = date.strftime("%Y%m%d")
|
59
|
+
@pool.with { |c| c.hgetall("#{datecode}-marks") }
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -0,0 +1,61 @@
|
|
1
|
+
require "sidekiq/component"
|
2
|
+
require "sidekiq/launcher"
|
3
|
+
require "sidekiq/metrics/tracking"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
class Embedded
|
7
|
+
include Sidekiq::Component
|
8
|
+
|
9
|
+
def initialize(config)
|
10
|
+
@config = config
|
11
|
+
end
|
12
|
+
|
13
|
+
def run
|
14
|
+
housekeeping
|
15
|
+
fire_event(:startup, reverse: false, reraise: true)
|
16
|
+
@launcher = Sidekiq::Launcher.new(@config, embedded: true)
|
17
|
+
@launcher.run
|
18
|
+
sleep 0.2 # pause to give threads time to spin up
|
19
|
+
|
20
|
+
logger.info "Sidekiq running embedded, total process thread count: #{Thread.list.size}"
|
21
|
+
logger.debug { Thread.list.map(&:name) }
|
22
|
+
end
|
23
|
+
|
24
|
+
def quiet
|
25
|
+
@launcher&.quiet
|
26
|
+
end
|
27
|
+
|
28
|
+
def stop
|
29
|
+
@launcher&.stop
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def housekeeping
|
35
|
+
logger.info "Running in #{RUBY_DESCRIPTION}"
|
36
|
+
logger.info Sidekiq::LICENSE
|
37
|
+
logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
|
38
|
+
|
39
|
+
# touch the connection pool so it is created before we
|
40
|
+
# fire startup and start multithreading.
|
41
|
+
info = config.redis_info
|
42
|
+
ver = Gem::Version.new(info["redis_version"])
|
43
|
+
raise "You are connecting to Redis #{ver}, Sidekiq requires Redis 6.2.0 or greater" if ver < Gem::Version.new("6.2.0")
|
44
|
+
|
45
|
+
maxmemory_policy = info["maxmemory_policy"]
|
46
|
+
if maxmemory_policy != "noeviction"
|
47
|
+
logger.warn <<~EOM
|
48
|
+
|
49
|
+
|
50
|
+
WARNING: Your Redis instance will evict Sidekiq data under heavy load.
|
51
|
+
The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
|
52
|
+
See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
|
53
|
+
|
54
|
+
EOM
|
55
|
+
end
|
56
|
+
|
57
|
+
logger.debug { "Client Middleware: #{@config.default_capsule.client_middleware.map(&:klass).join(", ")}" }
|
58
|
+
logger.debug { "Server Middleware: #{@config.default_capsule.server_middleware.map(&:klass).join(", ")}" }
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
data/lib/sidekiq/fetch.rb
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
require "sidekiq"
|
4
4
|
require "sidekiq/component"
|
5
|
+
require "sidekiq/capsule"
|
5
6
|
|
6
7
|
module Sidekiq # :nodoc:
|
7
8
|
class BasicFetch
|
@@ -26,31 +27,28 @@ module Sidekiq # :nodoc:
|
|
26
27
|
end
|
27
28
|
}
|
28
29
|
|
29
|
-
def initialize(
|
30
|
-
raise ArgumentError, "missing queue list" unless
|
31
|
-
@config =
|
32
|
-
@strictly_ordered_queues =
|
33
|
-
@queues =
|
34
|
-
if @strictly_ordered_queues
|
35
|
-
@queues.uniq!
|
36
|
-
@queues << {timeout: TIMEOUT}
|
37
|
-
end
|
30
|
+
def initialize(cap)
|
31
|
+
raise ArgumentError, "missing queue list" unless cap.queues
|
32
|
+
@config = cap
|
33
|
+
@strictly_ordered_queues = cap.mode == :strict
|
34
|
+
@queues = config.queues.map { |q| "queue:#{q}" }
|
35
|
+
@queues.uniq! if @strictly_ordered_queues
|
38
36
|
end
|
39
37
|
|
40
38
|
def retrieve_work
|
41
39
|
qs = queues_cmd
|
42
40
|
# 4825 Sidekiq Pro with all queues paused will return an
|
43
|
-
# empty set of queues
|
44
|
-
if qs.size <=
|
41
|
+
# empty set of queues
|
42
|
+
if qs.size <= 0
|
45
43
|
sleep(TIMEOUT)
|
46
44
|
return nil
|
47
45
|
end
|
48
46
|
|
49
|
-
queue, job = redis { |conn| conn.brpop
|
47
|
+
queue, job = redis { |conn| conn.blocking_call(conn.read_timeout + TIMEOUT, "brpop", *qs, TIMEOUT) }
|
50
48
|
UnitOfWork.new(queue, job, config) if queue
|
51
49
|
end
|
52
50
|
|
53
|
-
def bulk_requeue(inprogress
|
51
|
+
def bulk_requeue(inprogress)
|
54
52
|
return if inprogress.empty?
|
55
53
|
|
56
54
|
logger.debug { "Re-queueing terminated jobs" }
|
@@ -83,7 +81,6 @@ module Sidekiq # :nodoc:
|
|
83
81
|
else
|
84
82
|
permute = @queues.shuffle
|
85
83
|
permute.uniq!
|
86
|
-
permute << {timeout: TIMEOUT}
|
87
84
|
permute
|
88
85
|
end
|
89
86
|
end
|