sidekiq 6.2.2 → 7.1.2
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +299 -11
- data/LICENSE.txt +9 -0
- data/README.md +45 -32
- data/bin/sidekiq +4 -9
- data/bin/sidekiqload +207 -117
- data/bin/sidekiqmon +4 -1
- data/lib/generators/sidekiq/job_generator.rb +57 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +334 -190
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +95 -81
- data/lib/sidekiq/client.rb +102 -96
- data/lib/sidekiq/{util.rb → component.rb} +14 -41
- data/lib/sidekiq/config.rb +278 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +26 -26
- data/lib/sidekiq/job.rb +371 -5
- data/lib/sidekiq/job_logger.rb +16 -28
- data/lib/sidekiq/job_retry.rb +85 -59
- data/lib/sidekiq/job_util.rb +105 -0
- data/lib/sidekiq/launcher.rb +106 -94
- data/lib/sidekiq/logger.rb +9 -44
- data/lib/sidekiq/manager.rb +40 -41
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +136 -0
- data/lib/sidekiq/middleware/chain.rb +96 -51
- data/lib/sidekiq/middleware/current_attributes.rb +95 -0
- data/lib/sidekiq/middleware/i18n.rb +6 -4
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +17 -4
- data/lib/sidekiq/paginator.rb +17 -9
- data/lib/sidekiq/processor.rb +60 -60
- data/lib/sidekiq/rails.rb +29 -6
- data/lib/sidekiq/redis_client_adapter.rb +96 -0
- data/lib/sidekiq/redis_connection.rb +17 -88
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +101 -44
- data/lib/sidekiq/testing/inline.rb +4 -4
- data/lib/sidekiq/testing.rb +41 -68
- data/lib/sidekiq/transaction_aware_client.rb +44 -0
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/action.rb +3 -3
- data/lib/sidekiq/web/application.rb +47 -13
- data/lib/sidekiq/web/csrf_protection.rb +3 -3
- data/lib/sidekiq/web/helpers.rb +36 -33
- data/lib/sidekiq/web.rb +10 -17
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +86 -201
- data/sidekiq.gemspec +12 -10
- data/web/assets/javascripts/application.js +131 -60
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +166 -0
- data/web/assets/javascripts/dashboard.js +36 -273
- data/web/assets/javascripts/metrics.js +264 -0
- data/web/assets/stylesheets/application-dark.css +23 -23
- data/web/assets/stylesheets/application-rtl.css +2 -95
- data/web/assets/stylesheets/application.css +73 -402
- data/web/locales/ar.yml +70 -70
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -65
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +82 -69
- data/web/locales/es.yml +68 -68
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +81 -67
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +73 -68
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +66 -66
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +63 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +67 -66
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +67 -67
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +6 -3
- data/web/views/_job_info.erb +18 -2
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +1 -1
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +44 -28
- data/web/views/dashboard.erb +44 -12
- data/web/views/layout.erb +1 -1
- data/web/views/metrics.erb +82 -0
- data/web/views/metrics_for_job.erb +68 -0
- data/web/views/morgue.erb +5 -9
- data/web/views/queue.erb +24 -24
- data/web/views/queues.erb +4 -2
- data/web/views/retries.erb +5 -9
- data/web/views/scheduled.erb +12 -13
- metadata +62 -31
- data/LICENSE +0 -9
- data/lib/generators/sidekiq/worker_generator.rb +0 -57
- data/lib/sidekiq/delay.rb +0 -41
- data/lib/sidekiq/exception_handler.rb +0 -27
- data/lib/sidekiq/extensions/action_mailer.rb +0 -48
- data/lib/sidekiq/extensions/active_record.rb +0 -43
- data/lib/sidekiq/extensions/class_methods.rb +0 -43
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
- data/lib/sidekiq/worker.rb +0 -244
@@ -0,0 +1,278 @@
|
|
1
|
+
require "forwardable"
|
2
|
+
|
3
|
+
require "set"
|
4
|
+
require "sidekiq/redis_connection"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
# Sidekiq::Config represents the global configuration for an instance of Sidekiq.
|
8
|
+
class Config
|
9
|
+
extend Forwardable
|
10
|
+
|
11
|
+
DEFAULTS = {
|
12
|
+
labels: Set.new,
|
13
|
+
require: ".",
|
14
|
+
environment: nil,
|
15
|
+
concurrency: 5,
|
16
|
+
timeout: 25,
|
17
|
+
poll_interval_average: nil,
|
18
|
+
average_scheduled_poll_interval: 5,
|
19
|
+
on_complex_arguments: :raise,
|
20
|
+
error_handlers: [],
|
21
|
+
death_handlers: [],
|
22
|
+
lifecycle_events: {
|
23
|
+
startup: [],
|
24
|
+
quiet: [],
|
25
|
+
shutdown: [],
|
26
|
+
# triggers when we fire the first heartbeat on startup OR repairing a network partition
|
27
|
+
heartbeat: [],
|
28
|
+
# triggers on EVERY heartbeat call, every 10 seconds
|
29
|
+
beat: []
|
30
|
+
},
|
31
|
+
dead_max_jobs: 10_000,
|
32
|
+
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
|
33
|
+
reloader: proc { |&block| block.call },
|
34
|
+
backtrace_cleaner: ->(backtrace) { backtrace }
|
35
|
+
}
|
36
|
+
|
37
|
+
ERROR_HANDLER = ->(ex, ctx) {
|
38
|
+
cfg = ctx[:_config] || Sidekiq.default_configuration
|
39
|
+
l = cfg.logger
|
40
|
+
l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
|
41
|
+
l.warn("#{ex.class.name}: #{ex.message}")
|
42
|
+
unless ex.backtrace.nil?
|
43
|
+
backtrace = cfg[:backtrace_cleaner].call(ex.backtrace)
|
44
|
+
l.warn(backtrace.join("\n"))
|
45
|
+
end
|
46
|
+
}
|
47
|
+
|
48
|
+
def initialize(options = {})
|
49
|
+
@options = DEFAULTS.merge(options)
|
50
|
+
@options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
|
51
|
+
@directory = {}
|
52
|
+
@redis_config = {}
|
53
|
+
@capsules = {}
|
54
|
+
end
|
55
|
+
|
56
|
+
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
|
57
|
+
attr_reader :capsules
|
58
|
+
|
59
|
+
def to_json(*)
|
60
|
+
Sidekiq.dump_json(@options)
|
61
|
+
end
|
62
|
+
|
63
|
+
# LEGACY: edits the default capsule
|
64
|
+
# config.concurrency = 5
|
65
|
+
def concurrency=(val)
|
66
|
+
default_capsule.concurrency = Integer(val)
|
67
|
+
end
|
68
|
+
|
69
|
+
def concurrency
|
70
|
+
default_capsule.concurrency
|
71
|
+
end
|
72
|
+
|
73
|
+
def total_concurrency
|
74
|
+
capsules.each_value.sum(&:concurrency)
|
75
|
+
end
|
76
|
+
|
77
|
+
# Edit the default capsule.
|
78
|
+
# config.queues = %w( high default low ) # strict
|
79
|
+
# config.queues = %w( high,3 default,2 low,1 ) # weighted
|
80
|
+
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
|
81
|
+
#
|
82
|
+
# With weighted priority, queue will be checked first (weight / total) of the time.
|
83
|
+
# high will be checked first (3/6) or 50% of the time.
|
84
|
+
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
|
85
|
+
# are ridiculous and unnecessarily expensive. You can get random queue ordering
|
86
|
+
# by explicitly setting all weights to 1.
|
87
|
+
def queues=(val)
|
88
|
+
default_capsule.queues = val
|
89
|
+
end
|
90
|
+
|
91
|
+
def queues
|
92
|
+
default_capsule.queues
|
93
|
+
end
|
94
|
+
|
95
|
+
def client_middleware
|
96
|
+
@client_chain ||= Sidekiq::Middleware::Chain.new(self)
|
97
|
+
yield @client_chain if block_given?
|
98
|
+
@client_chain
|
99
|
+
end
|
100
|
+
|
101
|
+
def server_middleware
|
102
|
+
@server_chain ||= Sidekiq::Middleware::Chain.new(self)
|
103
|
+
yield @server_chain if block_given?
|
104
|
+
@server_chain
|
105
|
+
end
|
106
|
+
|
107
|
+
def default_capsule(&block)
|
108
|
+
capsule("default", &block)
|
109
|
+
end
|
110
|
+
|
111
|
+
# register a new queue processing subsystem
|
112
|
+
def capsule(name)
|
113
|
+
nm = name.to_s
|
114
|
+
cap = @capsules.fetch(nm) do
|
115
|
+
cap = Sidekiq::Capsule.new(nm, self)
|
116
|
+
@capsules[nm] = cap
|
117
|
+
end
|
118
|
+
yield cap if block_given?
|
119
|
+
cap
|
120
|
+
end
|
121
|
+
|
122
|
+
# All capsules must use the same Redis configuration
|
123
|
+
def redis=(hash)
|
124
|
+
@redis_config = @redis_config.merge(hash)
|
125
|
+
end
|
126
|
+
|
127
|
+
def redis_pool
|
128
|
+
Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
|
129
|
+
end
|
130
|
+
|
131
|
+
private def local_redis_pool
|
132
|
+
# this is our internal client/housekeeping pool. each capsule has its
|
133
|
+
# own pool for executing threads.
|
134
|
+
@redis ||= new_redis_pool(10, "internal")
|
135
|
+
end
|
136
|
+
|
137
|
+
def new_redis_pool(size, name = "unset")
|
138
|
+
# connection pool is lazy, it will not create connections unless you actually need them
|
139
|
+
# so don't be skimpy!
|
140
|
+
RedisConnection.create({size: size, logger: logger, pool_name: name}.merge(@redis_config))
|
141
|
+
end
|
142
|
+
|
143
|
+
def redis_info
|
144
|
+
redis do |conn|
|
145
|
+
conn.call("INFO") { |i| i.lines(chomp: true).map { |l| l.split(":", 2) }.select { |l| l.size == 2 }.to_h }
|
146
|
+
rescue RedisClientAdapter::CommandError => ex
|
147
|
+
# 2850 return fake version when INFO command has (probably) been renamed
|
148
|
+
raise unless /unknown command/.match?(ex.message)
|
149
|
+
{
|
150
|
+
"redis_version" => "9.9.9",
|
151
|
+
"uptime_in_days" => "9999",
|
152
|
+
"connected_clients" => "9999",
|
153
|
+
"used_memory_human" => "9P",
|
154
|
+
"used_memory_peak_human" => "9P"
|
155
|
+
}.freeze
|
156
|
+
end
|
157
|
+
end
|
158
|
+
|
159
|
+
def redis
|
160
|
+
raise ArgumentError, "requires a block" unless block_given?
|
161
|
+
redis_pool.with do |conn|
|
162
|
+
retryable = true
|
163
|
+
begin
|
164
|
+
yield conn
|
165
|
+
rescue RedisClientAdapter::BaseError => ex
|
166
|
+
# 2550 Failover can cause the server to become a replica, need
|
167
|
+
# to disconnect and reopen the socket to get back to the primary.
|
168
|
+
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
169
|
+
# 4985 Use the same logic when a blocking command is force-unblocked
|
170
|
+
# The same retry logic is also used in client.rb
|
171
|
+
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
|
172
|
+
conn.close
|
173
|
+
retryable = false
|
174
|
+
retry
|
175
|
+
end
|
176
|
+
raise
|
177
|
+
end
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
# register global singletons which can be accessed elsewhere
|
182
|
+
def register(name, instance)
|
183
|
+
@directory[name] = instance
|
184
|
+
end
|
185
|
+
|
186
|
+
# find a singleton
|
187
|
+
def lookup(name, default_class = nil)
|
188
|
+
# JNDI is just a fancy name for a hash lookup
|
189
|
+
@directory.fetch(name) do |key|
|
190
|
+
return nil unless default_class
|
191
|
+
@directory[key] = default_class.new(self)
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
##
|
196
|
+
# Death handlers are called when all retries for a job have been exhausted and
|
197
|
+
# the job dies. It's the notification to your application
|
198
|
+
# that this job will not succeed without manual intervention.
|
199
|
+
#
|
200
|
+
# Sidekiq.configure_server do |config|
|
201
|
+
# config.death_handlers << ->(job, ex) do
|
202
|
+
# end
|
203
|
+
# end
|
204
|
+
def death_handlers
|
205
|
+
@options[:death_handlers]
|
206
|
+
end
|
207
|
+
|
208
|
+
# How frequently Redis should be checked by a random Sidekiq process for
|
209
|
+
# scheduled and retriable jobs. Each individual process will take turns by
|
210
|
+
# waiting some multiple of this value.
|
211
|
+
#
|
212
|
+
# See sidekiq/scheduled.rb for an in-depth explanation of this value
|
213
|
+
def average_scheduled_poll_interval=(interval)
|
214
|
+
@options[:average_scheduled_poll_interval] = interval
|
215
|
+
end
|
216
|
+
|
217
|
+
# Register a proc to handle any error which occurs within the Sidekiq process.
|
218
|
+
#
|
219
|
+
# Sidekiq.configure_server do |config|
|
220
|
+
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
|
221
|
+
# end
|
222
|
+
#
|
223
|
+
# The default error handler logs errors to @logger.
|
224
|
+
def error_handlers
|
225
|
+
@options[:error_handlers]
|
226
|
+
end
|
227
|
+
|
228
|
+
# Register a block to run at a point in the Sidekiq lifecycle.
|
229
|
+
# :startup, :quiet or :shutdown are valid events.
|
230
|
+
#
|
231
|
+
# Sidekiq.configure_server do |config|
|
232
|
+
# config.on(:shutdown) do
|
233
|
+
# puts "Goodbye cruel world!"
|
234
|
+
# end
|
235
|
+
# end
|
236
|
+
def on(event, &block)
|
237
|
+
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
|
238
|
+
raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
|
239
|
+
@options[:lifecycle_events][event] << block
|
240
|
+
end
|
241
|
+
|
242
|
+
def logger
|
243
|
+
@logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
|
244
|
+
log.level = Logger::INFO
|
245
|
+
log.formatter = if ENV["DYNO"]
|
246
|
+
Sidekiq::Logger::Formatters::WithoutTimestamp.new
|
247
|
+
else
|
248
|
+
Sidekiq::Logger::Formatters::Pretty.new
|
249
|
+
end
|
250
|
+
end
|
251
|
+
end
|
252
|
+
|
253
|
+
def logger=(logger)
|
254
|
+
if logger.nil?
|
255
|
+
self.logger.level = Logger::FATAL
|
256
|
+
return
|
257
|
+
end
|
258
|
+
|
259
|
+
@logger = logger
|
260
|
+
end
|
261
|
+
|
262
|
+
# INTERNAL USE ONLY
|
263
|
+
def handle_exception(ex, ctx = {})
|
264
|
+
if @options[:error_handlers].size == 0
|
265
|
+
p ["!!!!!", ex]
|
266
|
+
end
|
267
|
+
ctx[:_config] = self
|
268
|
+
@options[:error_handlers].each do |handler|
|
269
|
+
handler.call(ex, ctx)
|
270
|
+
rescue Exception => e
|
271
|
+
l = logger
|
272
|
+
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
|
273
|
+
l.error e
|
274
|
+
l.error e.backtrace.join("\n") unless e.backtrace.nil?
|
275
|
+
end
|
276
|
+
end
|
277
|
+
end
|
278
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
require "sidekiq/redis_connection"
|
2
|
+
require "time"
|
3
|
+
|
4
|
+
# This file is designed to be required within the user's
|
5
|
+
# deployment script; it should need a bare minimum of dependencies.
|
6
|
+
# Usage:
|
7
|
+
#
|
8
|
+
# require "sidekiq/deploy"
|
9
|
+
# Sidekiq::Deploy.mark!("Some change")
|
10
|
+
#
|
11
|
+
# If you do not pass a label, Sidekiq will try to use the latest
|
12
|
+
# git commit info.
|
13
|
+
#
|
14
|
+
|
15
|
+
module Sidekiq
|
16
|
+
class Deploy
|
17
|
+
MARK_TTL = 90 * 24 * 60 * 60 # 90 days
|
18
|
+
|
19
|
+
LABEL_MAKER = -> {
|
20
|
+
`git log -1 --format="%h %s"`.strip
|
21
|
+
}
|
22
|
+
|
23
|
+
def self.mark!(label = nil)
|
24
|
+
Sidekiq::Deploy.new.mark!(label: label)
|
25
|
+
end
|
26
|
+
|
27
|
+
def initialize(pool = Sidekiq::RedisConnection.create)
|
28
|
+
@pool = pool
|
29
|
+
end
|
30
|
+
|
31
|
+
def mark!(at: Time.now, label: nil)
|
32
|
+
label ||= LABEL_MAKER.call
|
33
|
+
# we need to round the timestamp so that we gracefully
|
34
|
+
# handle an very common error in marking deploys:
|
35
|
+
# having every process mark its deploy, leading
|
36
|
+
# to N marks for each deploy. Instead we round the time
|
37
|
+
# to the minute so that multple marks within that minute
|
38
|
+
# will all naturally rollup into one mark per minute.
|
39
|
+
whence = at.utc
|
40
|
+
floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
|
41
|
+
datecode = floor.strftime("%Y%m%d")
|
42
|
+
key = "#{datecode}-marks"
|
43
|
+
stamp = floor.iso8601
|
44
|
+
|
45
|
+
@pool.with do |c|
|
46
|
+
# only allow one deploy mark for a given label for the next minute
|
47
|
+
lock = c.set("deploylock-#{label}", stamp, nx: true, ex: 60)
|
48
|
+
if lock
|
49
|
+
c.multi do |pipe|
|
50
|
+
pipe.hsetnx(key, stamp, label)
|
51
|
+
pipe.expire(key, MARK_TTL)
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
def fetch(date = Time.now.utc.to_date)
|
58
|
+
datecode = date.strftime("%Y%m%d")
|
59
|
+
@pool.with { |c| c.hgetall("#{datecode}-marks") }
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -0,0 +1,61 @@
|
|
1
|
+
require "sidekiq/component"
|
2
|
+
require "sidekiq/launcher"
|
3
|
+
require "sidekiq/metrics/tracking"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
class Embedded
|
7
|
+
include Sidekiq::Component
|
8
|
+
|
9
|
+
def initialize(config)
|
10
|
+
@config = config
|
11
|
+
end
|
12
|
+
|
13
|
+
def run
|
14
|
+
housekeeping
|
15
|
+
fire_event(:startup, reverse: false, reraise: true)
|
16
|
+
@launcher = Sidekiq::Launcher.new(@config, embedded: true)
|
17
|
+
@launcher.run
|
18
|
+
sleep 0.2 # pause to give threads time to spin up
|
19
|
+
|
20
|
+
logger.info "Sidekiq running embedded, total process thread count: #{Thread.list.size}"
|
21
|
+
logger.debug { Thread.list.map(&:name) }
|
22
|
+
end
|
23
|
+
|
24
|
+
def quiet
|
25
|
+
@launcher&.quiet
|
26
|
+
end
|
27
|
+
|
28
|
+
def stop
|
29
|
+
@launcher&.stop
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def housekeeping
|
35
|
+
logger.info "Running in #{RUBY_DESCRIPTION}"
|
36
|
+
logger.info Sidekiq::LICENSE
|
37
|
+
logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
|
38
|
+
|
39
|
+
# touch the connection pool so it is created before we
|
40
|
+
# fire startup and start multithreading.
|
41
|
+
info = config.redis_info
|
42
|
+
ver = Gem::Version.new(info["redis_version"])
|
43
|
+
raise "You are connecting to Redis #{ver}, Sidekiq requires Redis 6.2.0 or greater" if ver < Gem::Version.new("6.2.0")
|
44
|
+
|
45
|
+
maxmemory_policy = info["maxmemory_policy"]
|
46
|
+
if maxmemory_policy != "noeviction"
|
47
|
+
logger.warn <<~EOM
|
48
|
+
|
49
|
+
|
50
|
+
WARNING: Your Redis instance will evict Sidekiq data under heavy load.
|
51
|
+
The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
|
52
|
+
See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
|
53
|
+
|
54
|
+
EOM
|
55
|
+
end
|
56
|
+
|
57
|
+
logger.debug { "Client Middleware: #{@config.default_capsule.client_middleware.map(&:klass).join(", ")}" }
|
58
|
+
logger.debug { "Server Middleware: #{@config.default_capsule.server_middleware.map(&:klass).join(", ")}" }
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
data/lib/sidekiq/fetch.rb
CHANGED
@@ -1,14 +1,17 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require "sidekiq"
|
4
|
+
require "sidekiq/component"
|
5
|
+
require "sidekiq/capsule"
|
4
6
|
|
5
|
-
module Sidekiq
|
7
|
+
module Sidekiq # :nodoc:
|
6
8
|
class BasicFetch
|
9
|
+
include Sidekiq::Component
|
7
10
|
# We want the fetch operation to timeout every few seconds so the thread
|
8
11
|
# can check if the process is shutting down.
|
9
12
|
TIMEOUT = 2
|
10
13
|
|
11
|
-
UnitOfWork = Struct.new(:queue, :job) {
|
14
|
+
UnitOfWork = Struct.new(:queue, :job, :config) {
|
12
15
|
def acknowledge
|
13
16
|
# nothing to do
|
14
17
|
end
|
@@ -18,56 +21,53 @@ module Sidekiq
|
|
18
21
|
end
|
19
22
|
|
20
23
|
def requeue
|
21
|
-
|
24
|
+
config.redis do |conn|
|
22
25
|
conn.rpush(queue, job)
|
23
26
|
end
|
24
27
|
end
|
25
28
|
}
|
26
29
|
|
27
|
-
def initialize(
|
28
|
-
raise ArgumentError, "missing queue list" unless
|
29
|
-
@
|
30
|
-
@strictly_ordered_queues =
|
31
|
-
@queues =
|
32
|
-
if @strictly_ordered_queues
|
33
|
-
@queues.uniq!
|
34
|
-
@queues << TIMEOUT
|
35
|
-
end
|
30
|
+
def initialize(cap)
|
31
|
+
raise ArgumentError, "missing queue list" unless cap.queues
|
32
|
+
@config = cap
|
33
|
+
@strictly_ordered_queues = cap.mode == :strict
|
34
|
+
@queues = config.queues.map { |q| "queue:#{q}" }
|
35
|
+
@queues.uniq! if @strictly_ordered_queues
|
36
36
|
end
|
37
37
|
|
38
38
|
def retrieve_work
|
39
39
|
qs = queues_cmd
|
40
40
|
# 4825 Sidekiq Pro with all queues paused will return an
|
41
|
-
# empty set of queues
|
42
|
-
if qs.size <=
|
41
|
+
# empty set of queues
|
42
|
+
if qs.size <= 0
|
43
43
|
sleep(TIMEOUT)
|
44
44
|
return nil
|
45
45
|
end
|
46
46
|
|
47
|
-
|
48
|
-
UnitOfWork.new(
|
47
|
+
queue, job = redis { |conn| conn.blocking_call(conn.read_timeout + TIMEOUT, "brpop", *qs, TIMEOUT) }
|
48
|
+
UnitOfWork.new(queue, job, config) if queue
|
49
49
|
end
|
50
50
|
|
51
|
-
def bulk_requeue(inprogress
|
51
|
+
def bulk_requeue(inprogress)
|
52
52
|
return if inprogress.empty?
|
53
53
|
|
54
|
-
|
54
|
+
logger.debug { "Re-queueing terminated jobs" }
|
55
55
|
jobs_to_requeue = {}
|
56
56
|
inprogress.each do |unit_of_work|
|
57
57
|
jobs_to_requeue[unit_of_work.queue] ||= []
|
58
58
|
jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
|
59
59
|
end
|
60
60
|
|
61
|
-
|
62
|
-
conn.pipelined do
|
61
|
+
redis do |conn|
|
62
|
+
conn.pipelined do |pipeline|
|
63
63
|
jobs_to_requeue.each do |queue, jobs|
|
64
|
-
|
64
|
+
pipeline.rpush(queue, jobs)
|
65
65
|
end
|
66
66
|
end
|
67
67
|
end
|
68
|
-
|
68
|
+
logger.info("Pushed #{inprogress.size} jobs back to Redis")
|
69
69
|
rescue => ex
|
70
|
-
|
70
|
+
logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
|
71
71
|
end
|
72
72
|
|
73
73
|
# Creating the Redis#brpop command takes into account any
|
@@ -79,9 +79,9 @@ module Sidekiq
|
|
79
79
|
if @strictly_ordered_queues
|
80
80
|
@queues
|
81
81
|
else
|
82
|
-
|
83
|
-
|
84
|
-
|
82
|
+
permute = @queues.shuffle
|
83
|
+
permute.uniq!
|
84
|
+
permute
|
85
85
|
end
|
86
86
|
end
|
87
87
|
end
|