sidekiq 6.4.1 → 7.0.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +107 -5
- data/README.md +14 -13
- data/bin/sidekiq +3 -8
- data/bin/sidekiqload +26 -29
- data/lib/sidekiq/api.rb +232 -157
- data/lib/sidekiq/capsule.rb +110 -0
- data/lib/sidekiq/cli.rb +80 -86
- data/lib/sidekiq/client.rb +54 -42
- data/lib/sidekiq/component.rb +66 -0
- data/lib/sidekiq/config.rb +271 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +20 -19
- data/lib/sidekiq/job.rb +375 -10
- data/lib/sidekiq/job_logger.rb +1 -1
- data/lib/sidekiq/job_retry.rb +74 -53
- data/lib/sidekiq/job_util.rb +17 -11
- data/lib/sidekiq/launcher.rb +63 -69
- data/lib/sidekiq/logger.rb +6 -45
- data/lib/sidekiq/manager.rb +33 -32
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +134 -0
- data/lib/sidekiq/middleware/chain.rb +84 -42
- data/lib/sidekiq/middleware/current_attributes.rb +18 -17
- data/lib/sidekiq/middleware/i18n.rb +6 -4
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +1 -1
- data/lib/sidekiq/paginator.rb +10 -2
- data/lib/sidekiq/processor.rb +56 -59
- data/lib/sidekiq/rails.rb +10 -9
- data/lib/sidekiq/redis_client_adapter.rb +118 -0
- data/lib/sidekiq/redis_connection.rb +13 -82
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +65 -37
- data/lib/sidekiq/testing/inline.rb +4 -4
- data/lib/sidekiq/testing.rb +41 -68
- data/lib/sidekiq/transaction_aware_client.rb +44 -0
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/action.rb +3 -3
- data/lib/sidekiq/web/application.rb +22 -6
- data/lib/sidekiq/web/csrf_protection.rb +3 -3
- data/lib/sidekiq/web/helpers.rb +21 -19
- data/lib/sidekiq/web.rb +3 -14
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +84 -207
- data/sidekiq.gemspec +29 -5
- data/web/assets/javascripts/application.js +58 -26
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +166 -0
- data/web/assets/javascripts/dashboard.js +3 -240
- data/web/assets/javascripts/metrics.js +236 -0
- data/web/assets/stylesheets/application-rtl.css +2 -91
- data/web/assets/stylesheets/application.css +64 -297
- data/web/locales/ar.yml +70 -70
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +52 -52
- data/web/locales/de.yml +65 -65
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +82 -69
- data/web/locales/es.yml +68 -68
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +67 -67
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +71 -68
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +66 -66
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +63 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +67 -66
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +67 -67
- data/web/locales/zh-cn.yml +37 -11
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +5 -2
- data/web/views/_nav.erb +1 -1
- data/web/views/_summary.erb +1 -1
- data/web/views/busy.erb +9 -4
- data/web/views/dashboard.erb +36 -4
- data/web/views/metrics.erb +80 -0
- data/web/views/metrics_for_job.erb +69 -0
- data/web/views/queue.erb +5 -1
- metadata +69 -22
- data/lib/sidekiq/delay.rb +0 -43
- data/lib/sidekiq/exception_handler.rb +0 -27
- data/lib/sidekiq/extensions/action_mailer.rb +0 -48
- data/lib/sidekiq/extensions/active_record.rb +0 -43
- data/lib/sidekiq/extensions/class_methods.rb +0 -43
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
- data/lib/sidekiq/util.rb +0 -108
- data/lib/sidekiq/worker.rb +0 -362
- /data/{LICENSE → LICENSE.txt} +0 -0
@@ -0,0 +1,66 @@
|
|
1
|
+
module Sidekiq
|
2
|
+
##
|
3
|
+
# Sidekiq::Component assumes a config instance is available at @config
|
4
|
+
module Component # :nodoc:
|
5
|
+
attr_reader :config
|
6
|
+
|
7
|
+
def watchdog(last_words)
|
8
|
+
yield
|
9
|
+
rescue Exception => ex
|
10
|
+
handle_exception(ex, {context: last_words})
|
11
|
+
raise ex
|
12
|
+
end
|
13
|
+
|
14
|
+
def safe_thread(name, &block)
|
15
|
+
Thread.new do
|
16
|
+
Thread.current.name = name
|
17
|
+
watchdog(name, &block)
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
def logger
|
22
|
+
config.logger
|
23
|
+
end
|
24
|
+
|
25
|
+
def redis(&block)
|
26
|
+
config.redis(&block)
|
27
|
+
end
|
28
|
+
|
29
|
+
def tid
|
30
|
+
Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
|
31
|
+
end
|
32
|
+
|
33
|
+
def hostname
|
34
|
+
ENV["DYNO"] || Socket.gethostname
|
35
|
+
end
|
36
|
+
|
37
|
+
def process_nonce
|
38
|
+
@@process_nonce ||= SecureRandom.hex(6)
|
39
|
+
end
|
40
|
+
|
41
|
+
def identity
|
42
|
+
@@identity ||= "#{hostname}:#{::Process.pid}:#{process_nonce}"
|
43
|
+
end
|
44
|
+
|
45
|
+
def handle_exception(ex, ctx = {})
|
46
|
+
config.handle_exception(ex, ctx)
|
47
|
+
end
|
48
|
+
|
49
|
+
def fire_event(event, options = {})
|
50
|
+
oneshot = options.fetch(:oneshot, true)
|
51
|
+
reverse = options[:reverse]
|
52
|
+
reraise = options[:reraise]
|
53
|
+
logger.debug("Firing #{event} event") if oneshot
|
54
|
+
|
55
|
+
arr = config[:lifecycle_events][event]
|
56
|
+
arr.reverse! if reverse
|
57
|
+
arr.each do |block|
|
58
|
+
block.call
|
59
|
+
rescue => ex
|
60
|
+
handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event})
|
61
|
+
raise ex if reraise
|
62
|
+
end
|
63
|
+
arr.clear if oneshot # once we've fired an event, we never fire it again
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
@@ -0,0 +1,271 @@
|
|
1
|
+
require "forwardable"
|
2
|
+
|
3
|
+
require "set"
|
4
|
+
require "sidekiq/redis_connection"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
# Sidekiq::Config represents the global configuration for an instance of Sidekiq.
|
8
|
+
class Config
|
9
|
+
extend Forwardable
|
10
|
+
|
11
|
+
DEFAULTS = {
|
12
|
+
labels: Set.new,
|
13
|
+
require: ".",
|
14
|
+
environment: nil,
|
15
|
+
concurrency: 5,
|
16
|
+
timeout: 25,
|
17
|
+
poll_interval_average: nil,
|
18
|
+
average_scheduled_poll_interval: 5,
|
19
|
+
on_complex_arguments: :raise,
|
20
|
+
error_handlers: [],
|
21
|
+
death_handlers: [],
|
22
|
+
lifecycle_events: {
|
23
|
+
startup: [],
|
24
|
+
quiet: [],
|
25
|
+
shutdown: [],
|
26
|
+
# triggers when we fire the first heartbeat on startup OR repairing a network partition
|
27
|
+
heartbeat: [],
|
28
|
+
# triggers on EVERY heartbeat call, every 10 seconds
|
29
|
+
beat: []
|
30
|
+
},
|
31
|
+
dead_max_jobs: 10_000,
|
32
|
+
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
|
33
|
+
reloader: proc { |&block| block.call }
|
34
|
+
}
|
35
|
+
|
36
|
+
ERROR_HANDLER = ->(ex, ctx) {
|
37
|
+
cfg = ctx[:_config] || Sidekiq.default_configuration
|
38
|
+
l = cfg.logger
|
39
|
+
l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
|
40
|
+
l.warn("#{ex.class.name}: #{ex.message}")
|
41
|
+
l.warn(ex.backtrace.join("\n")) unless ex.backtrace.nil?
|
42
|
+
}
|
43
|
+
|
44
|
+
def initialize(options = {})
|
45
|
+
@options = DEFAULTS.merge(options)
|
46
|
+
@options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
|
47
|
+
@directory = {}
|
48
|
+
@redis_config = {}
|
49
|
+
@capsules = {}
|
50
|
+
end
|
51
|
+
|
52
|
+
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
|
53
|
+
attr_reader :capsules
|
54
|
+
|
55
|
+
# LEGACY: edits the default capsule
|
56
|
+
# config.concurrency = 5
|
57
|
+
def concurrency=(val)
|
58
|
+
default_capsule.concurrency = Integer(val)
|
59
|
+
end
|
60
|
+
|
61
|
+
def concurrency
|
62
|
+
default_capsule.concurrency
|
63
|
+
end
|
64
|
+
|
65
|
+
def total_concurrency
|
66
|
+
capsules.each_value.sum(&:concurrency)
|
67
|
+
end
|
68
|
+
|
69
|
+
# Edit the default capsule.
|
70
|
+
# config.queues = %w( high default low ) # strict
|
71
|
+
# config.queues = %w( high,3 default,2 low,1 ) # weighted
|
72
|
+
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
|
73
|
+
#
|
74
|
+
# With weighted priority, queue will be checked first (weight / total) of the time.
|
75
|
+
# high will be checked first (3/6) or 50% of the time.
|
76
|
+
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
|
77
|
+
# are ridiculous and unnecessarily expensive. You can get random queue ordering
|
78
|
+
# by explicitly setting all weights to 1.
|
79
|
+
def queues=(val)
|
80
|
+
default_capsule.queues = val
|
81
|
+
end
|
82
|
+
|
83
|
+
def queues
|
84
|
+
default_capsule.queues
|
85
|
+
end
|
86
|
+
|
87
|
+
def client_middleware
|
88
|
+
@client_chain ||= Sidekiq::Middleware::Chain.new(self)
|
89
|
+
yield @client_chain if block_given?
|
90
|
+
@client_chain
|
91
|
+
end
|
92
|
+
|
93
|
+
def server_middleware
|
94
|
+
@server_chain ||= Sidekiq::Middleware::Chain.new(self)
|
95
|
+
yield @server_chain if block_given?
|
96
|
+
@server_chain
|
97
|
+
end
|
98
|
+
|
99
|
+
def default_capsule(&block)
|
100
|
+
capsule("default", &block)
|
101
|
+
end
|
102
|
+
|
103
|
+
# register a new queue processing subsystem
|
104
|
+
def capsule(name)
|
105
|
+
nm = name.to_s
|
106
|
+
cap = @capsules.fetch(nm) do
|
107
|
+
cap = Sidekiq::Capsule.new(nm, self)
|
108
|
+
@capsules[nm] = cap
|
109
|
+
end
|
110
|
+
yield cap if block_given?
|
111
|
+
cap
|
112
|
+
end
|
113
|
+
|
114
|
+
# All capsules must use the same Redis configuration
|
115
|
+
def redis=(hash)
|
116
|
+
@redis_config = @redis_config.merge(hash)
|
117
|
+
end
|
118
|
+
|
119
|
+
def redis_pool
|
120
|
+
Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
|
121
|
+
end
|
122
|
+
|
123
|
+
private def local_redis_pool
|
124
|
+
# this is our internal client/housekeeping pool. each capsule has its
|
125
|
+
# own pool for executing threads.
|
126
|
+
@redis ||= new_redis_pool(5, "internal")
|
127
|
+
end
|
128
|
+
|
129
|
+
def new_redis_pool(size, name = "unset")
|
130
|
+
# connection pool is lazy, it will not create connections unless you actually need them
|
131
|
+
# so don't be skimpy!
|
132
|
+
RedisConnection.create(@redis_config.merge(size: size, logger: logger, pool_name: name))
|
133
|
+
end
|
134
|
+
|
135
|
+
def redis_info
|
136
|
+
redis do |conn|
|
137
|
+
conn.info
|
138
|
+
rescue RedisClientAdapter::CommandError => ex
|
139
|
+
# 2850 return fake version when INFO command has (probably) been renamed
|
140
|
+
raise unless /unknown command/.match?(ex.message)
|
141
|
+
{
|
142
|
+
"redis_version" => "9.9.9",
|
143
|
+
"uptime_in_days" => "9999",
|
144
|
+
"connected_clients" => "9999",
|
145
|
+
"used_memory_human" => "9P",
|
146
|
+
"used_memory_peak_human" => "9P"
|
147
|
+
}.freeze
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
def redis
|
152
|
+
raise ArgumentError, "requires a block" unless block_given?
|
153
|
+
redis_pool.with do |conn|
|
154
|
+
retryable = true
|
155
|
+
begin
|
156
|
+
yield conn
|
157
|
+
rescue RedisClientAdapter::BaseError => ex
|
158
|
+
# 2550 Failover can cause the server to become a replica, need
|
159
|
+
# to disconnect and reopen the socket to get back to the primary.
|
160
|
+
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
161
|
+
# 4985 Use the same logic when a blocking command is force-unblocked
|
162
|
+
# The same retry logic is also used in client.rb
|
163
|
+
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
|
164
|
+
conn.close
|
165
|
+
retryable = false
|
166
|
+
retry
|
167
|
+
end
|
168
|
+
raise
|
169
|
+
end
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
# register global singletons which can be accessed elsewhere
|
174
|
+
def register(name, instance)
|
175
|
+
@directory[name] = instance
|
176
|
+
end
|
177
|
+
|
178
|
+
# find a singleton
|
179
|
+
def lookup(name, default_class = nil)
|
180
|
+
# JNDI is just a fancy name for a hash lookup
|
181
|
+
@directory.fetch(name) do |key|
|
182
|
+
return nil unless default_class
|
183
|
+
@directory[key] = default_class.new(self)
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
##
|
188
|
+
# Death handlers are called when all retries for a job have been exhausted and
|
189
|
+
# the job dies. It's the notification to your application
|
190
|
+
# that this job will not succeed without manual intervention.
|
191
|
+
#
|
192
|
+
# Sidekiq.configure_server do |config|
|
193
|
+
# config.death_handlers << ->(job, ex) do
|
194
|
+
# end
|
195
|
+
# end
|
196
|
+
def death_handlers
|
197
|
+
@options[:death_handlers]
|
198
|
+
end
|
199
|
+
|
200
|
+
# How frequently Redis should be checked by a random Sidekiq process for
|
201
|
+
# scheduled and retriable jobs. Each individual process will take turns by
|
202
|
+
# waiting some multiple of this value.
|
203
|
+
#
|
204
|
+
# See sidekiq/scheduled.rb for an in-depth explanation of this value
|
205
|
+
def average_scheduled_poll_interval=(interval)
|
206
|
+
@options[:average_scheduled_poll_interval] = interval
|
207
|
+
end
|
208
|
+
|
209
|
+
# Register a proc to handle any error which occurs within the Sidekiq process.
|
210
|
+
#
|
211
|
+
# Sidekiq.configure_server do |config|
|
212
|
+
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
|
213
|
+
# end
|
214
|
+
#
|
215
|
+
# The default error handler logs errors to @logger.
|
216
|
+
def error_handlers
|
217
|
+
@options[:error_handlers]
|
218
|
+
end
|
219
|
+
|
220
|
+
# Register a block to run at a point in the Sidekiq lifecycle.
|
221
|
+
# :startup, :quiet or :shutdown are valid events.
|
222
|
+
#
|
223
|
+
# Sidekiq.configure_server do |config|
|
224
|
+
# config.on(:shutdown) do
|
225
|
+
# puts "Goodbye cruel world!"
|
226
|
+
# end
|
227
|
+
# end
|
228
|
+
def on(event, &block)
|
229
|
+
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
|
230
|
+
raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
|
231
|
+
@options[:lifecycle_events][event] << block
|
232
|
+
end
|
233
|
+
|
234
|
+
def logger
|
235
|
+
@logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
|
236
|
+
log.level = Logger::INFO
|
237
|
+
log.formatter = if ENV["DYNO"]
|
238
|
+
Sidekiq::Logger::Formatters::WithoutTimestamp.new
|
239
|
+
else
|
240
|
+
Sidekiq::Logger::Formatters::Pretty.new
|
241
|
+
end
|
242
|
+
end
|
243
|
+
end
|
244
|
+
|
245
|
+
def logger=(logger)
|
246
|
+
if logger.nil?
|
247
|
+
self.logger.level = Logger::FATAL
|
248
|
+
return
|
249
|
+
end
|
250
|
+
|
251
|
+
logger.extend(Sidekiq::LoggingUtils)
|
252
|
+
@logger = logger
|
253
|
+
end
|
254
|
+
|
255
|
+
# INTERNAL USE ONLY
|
256
|
+
def handle_exception(ex, ctx = {})
|
257
|
+
if @options[:error_handlers].size == 0
|
258
|
+
p ["!!!!!", ex]
|
259
|
+
end
|
260
|
+
ctx[:_config] = self
|
261
|
+
@options[:error_handlers].each do |handler|
|
262
|
+
handler.call(ex, ctx)
|
263
|
+
rescue => e
|
264
|
+
l = logger
|
265
|
+
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
|
266
|
+
l.error e
|
267
|
+
l.error e.backtrace.join("\n") unless e.backtrace.nil?
|
268
|
+
end
|
269
|
+
end
|
270
|
+
end
|
271
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
require "sidekiq/redis_connection"
|
2
|
+
require "time"
|
3
|
+
|
4
|
+
# This file is designed to be required within the user's
|
5
|
+
# deployment script; it should need a bare minimum of dependencies.
|
6
|
+
# Usage:
|
7
|
+
#
|
8
|
+
# require "sidekiq/deploy"
|
9
|
+
# Sidekiq::Deploy.mark!("Some change")
|
10
|
+
#
|
11
|
+
# If you do not pass a label, Sidekiq will try to use the latest
|
12
|
+
# git commit info.
|
13
|
+
#
|
14
|
+
|
15
|
+
module Sidekiq
|
16
|
+
class Deploy
|
17
|
+
MARK_TTL = 90 * 24 * 60 * 60 # 90 days
|
18
|
+
|
19
|
+
LABEL_MAKER = -> {
|
20
|
+
`git log -1 --format="%h %s"`.strip
|
21
|
+
}
|
22
|
+
|
23
|
+
def self.mark!(label = nil)
|
24
|
+
label ||= LABEL_MAKER.call
|
25
|
+
Sidekiq::Deploy.new.mark(label: label)
|
26
|
+
end
|
27
|
+
|
28
|
+
def initialize(pool = Sidekiq::RedisConnection.create)
|
29
|
+
@pool = pool
|
30
|
+
end
|
31
|
+
|
32
|
+
def mark(at: Time.now, label: "")
|
33
|
+
# we need to round the timestamp so that we gracefully
|
34
|
+
# handle an very common error in marking deploys:
|
35
|
+
# having every process mark its deploy, leading
|
36
|
+
# to N marks for each deploy. Instead we round the time
|
37
|
+
# to the minute so that multple marks within that minute
|
38
|
+
# will all naturally rollup into one mark per minute.
|
39
|
+
whence = at.utc
|
40
|
+
floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
|
41
|
+
datecode = floor.strftime("%Y%m%d")
|
42
|
+
key = "#{datecode}-marks"
|
43
|
+
stamp = floor.iso8601
|
44
|
+
|
45
|
+
@pool.with do |c|
|
46
|
+
# only allow one deploy mark for a given label for the next minute
|
47
|
+
lock = c.set("deploylock-#{label}", stamp, nx: true, ex: 60)
|
48
|
+
if lock
|
49
|
+
c.multi do |pipe|
|
50
|
+
pipe.hsetnx(key, stamp, label)
|
51
|
+
pipe.expire(key, MARK_TTL)
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
def fetch(date = Time.now.utc.to_date)
|
58
|
+
datecode = date.strftime("%Y%m%d")
|
59
|
+
@pool.with { |c| c.hgetall("#{datecode}-marks") }
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -0,0 +1,61 @@
|
|
1
|
+
require "sidekiq/component"
|
2
|
+
require "sidekiq/launcher"
|
3
|
+
require "sidekiq/metrics/tracking"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
class Embedded
|
7
|
+
include Sidekiq::Component
|
8
|
+
|
9
|
+
def initialize(config)
|
10
|
+
@config = config
|
11
|
+
end
|
12
|
+
|
13
|
+
def run
|
14
|
+
housekeeping
|
15
|
+
fire_event(:startup, reverse: false, reraise: true)
|
16
|
+
@launcher = Sidekiq::Launcher.new(@config, embedded: true)
|
17
|
+
@launcher.run
|
18
|
+
sleep 0.1 # pause to give threads time to spin up
|
19
|
+
|
20
|
+
logger.info "Embedded mode running with #{Thread.list.size} threads"
|
21
|
+
logger.debug { Thread.list.map(&:name) }
|
22
|
+
end
|
23
|
+
|
24
|
+
def quiet
|
25
|
+
@launcher&.quiet
|
26
|
+
end
|
27
|
+
|
28
|
+
def stop
|
29
|
+
@launcher&.stop
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def housekeeping
|
35
|
+
logger.info "Running in #{RUBY_DESCRIPTION}"
|
36
|
+
logger.info Sidekiq::LICENSE
|
37
|
+
logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
|
38
|
+
|
39
|
+
# touch the connection pool so it is created before we
|
40
|
+
# fire startup and start multithreading.
|
41
|
+
info = config.redis_info
|
42
|
+
ver = Gem::Version.new(info["redis_version"])
|
43
|
+
raise "You are connecting to Redis #{ver}, Sidekiq requires Redis 6.2.0 or greater" if ver < Gem::Version.new("6.2.0")
|
44
|
+
|
45
|
+
maxmemory_policy = info["maxmemory_policy"]
|
46
|
+
if maxmemory_policy != "noeviction"
|
47
|
+
logger.warn <<~EOM
|
48
|
+
|
49
|
+
|
50
|
+
WARNING: Your Redis instance will evict Sidekiq data under heavy load.
|
51
|
+
The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
|
52
|
+
See: https://github.com/mperham/sidekiq/wiki/Using-Redis#memory
|
53
|
+
|
54
|
+
EOM
|
55
|
+
end
|
56
|
+
|
57
|
+
logger.debug { "Client Middleware: #{@config.default_capsule.client_middleware.map(&:klass).join(", ")}" }
|
58
|
+
logger.debug { "Server Middleware: #{@config.default_capsule.server_middleware.map(&:klass).join(", ")}" }
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
data/lib/sidekiq/fetch.rb
CHANGED
@@ -1,14 +1,17 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require "sidekiq"
|
4
|
+
require "sidekiq/component"
|
5
|
+
require "sidekiq/capsule"
|
4
6
|
|
5
|
-
module Sidekiq
|
7
|
+
module Sidekiq # :nodoc:
|
6
8
|
class BasicFetch
|
9
|
+
include Sidekiq::Component
|
7
10
|
# We want the fetch operation to timeout every few seconds so the thread
|
8
11
|
# can check if the process is shutting down.
|
9
12
|
TIMEOUT = 2
|
10
13
|
|
11
|
-
UnitOfWork = Struct.new(:queue, :job) {
|
14
|
+
UnitOfWork = Struct.new(:queue, :job, :config) {
|
12
15
|
def acknowledge
|
13
16
|
# nothing to do
|
14
17
|
end
|
@@ -18,56 +21,55 @@ module Sidekiq
|
|
18
21
|
end
|
19
22
|
|
20
23
|
def requeue
|
21
|
-
|
24
|
+
config.redis do |conn|
|
22
25
|
conn.rpush(queue, job)
|
23
26
|
end
|
24
27
|
end
|
25
28
|
}
|
26
29
|
|
27
|
-
def initialize(
|
28
|
-
raise ArgumentError, "missing queue list" unless
|
29
|
-
@
|
30
|
-
@strictly_ordered_queues =
|
31
|
-
@queues =
|
30
|
+
def initialize(cap)
|
31
|
+
raise ArgumentError, "missing queue list" unless cap.queues
|
32
|
+
@config = cap
|
33
|
+
@strictly_ordered_queues = (config.queues.size == config.queues.uniq.size)
|
34
|
+
@queues = config.queues.map { |q| "queue:#{q}" }
|
32
35
|
if @strictly_ordered_queues
|
33
36
|
@queues.uniq!
|
34
|
-
@queues << TIMEOUT
|
35
37
|
end
|
36
38
|
end
|
37
39
|
|
38
40
|
def retrieve_work
|
39
41
|
qs = queues_cmd
|
40
42
|
# 4825 Sidekiq Pro with all queues paused will return an
|
41
|
-
# empty set of queues
|
42
|
-
if qs.size <=
|
43
|
+
# empty set of queues
|
44
|
+
if qs.size <= 0
|
43
45
|
sleep(TIMEOUT)
|
44
46
|
return nil
|
45
47
|
end
|
46
48
|
|
47
|
-
|
48
|
-
UnitOfWork.new(
|
49
|
+
queue, job = redis { |conn| conn.blocking_call(false, "brpop", *qs, TIMEOUT) }
|
50
|
+
UnitOfWork.new(queue, job, config) if queue
|
49
51
|
end
|
50
52
|
|
51
|
-
def bulk_requeue(inprogress
|
53
|
+
def bulk_requeue(inprogress)
|
52
54
|
return if inprogress.empty?
|
53
55
|
|
54
|
-
|
56
|
+
logger.debug { "Re-queueing terminated jobs" }
|
55
57
|
jobs_to_requeue = {}
|
56
58
|
inprogress.each do |unit_of_work|
|
57
59
|
jobs_to_requeue[unit_of_work.queue] ||= []
|
58
60
|
jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
|
59
61
|
end
|
60
62
|
|
61
|
-
|
63
|
+
redis do |conn|
|
62
64
|
conn.pipelined do |pipeline|
|
63
65
|
jobs_to_requeue.each do |queue, jobs|
|
64
66
|
pipeline.rpush(queue, jobs)
|
65
67
|
end
|
66
68
|
end
|
67
69
|
end
|
68
|
-
|
70
|
+
logger.info("Pushed #{inprogress.size} jobs back to Redis")
|
69
71
|
rescue => ex
|
70
|
-
|
72
|
+
logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
|
71
73
|
end
|
72
74
|
|
73
75
|
# Creating the Redis#brpop command takes into account any
|
@@ -81,7 +83,6 @@ module Sidekiq
|
|
81
83
|
else
|
82
84
|
permute = @queues.shuffle
|
83
85
|
permute.uniq!
|
84
|
-
permute << TIMEOUT
|
85
86
|
permute
|
86
87
|
end
|
87
88
|
end
|