sidekiq 5.2.4 → 7.2.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Changes.md +672 -8
- data/LICENSE.txt +9 -0
- data/README.md +48 -51
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiq +22 -3
- data/bin/sidekiqload +213 -115
- data/bin/sidekiqmon +11 -0
- data/lib/generators/sidekiq/job_generator.rb +57 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +623 -352
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +214 -229
- data/lib/sidekiq/client.rb +127 -102
- data/lib/sidekiq/component.rb +68 -0
- data/lib/sidekiq/config.rb +287 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +49 -42
- data/lib/sidekiq/job.rb +374 -0
- data/lib/sidekiq/job_logger.rb +33 -7
- data/lib/sidekiq/job_retry.rb +157 -108
- data/lib/sidekiq/job_util.rb +107 -0
- data/lib/sidekiq/launcher.rb +206 -106
- data/lib/sidekiq/logger.rb +131 -0
- data/lib/sidekiq/manager.rb +43 -46
- data/lib/sidekiq/metrics/query.rb +156 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +140 -0
- data/lib/sidekiq/middleware/chain.rb +113 -56
- data/lib/sidekiq/middleware/current_attributes.rb +95 -0
- data/lib/sidekiq/middleware/i18n.rb +7 -7
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +146 -0
- data/lib/sidekiq/paginator.rb +28 -16
- data/lib/sidekiq/processor.rb +126 -117
- data/lib/sidekiq/rails.rb +52 -38
- data/lib/sidekiq/redis_client_adapter.rb +111 -0
- data/lib/sidekiq/redis_connection.rb +41 -112
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +112 -50
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing/inline.rb +6 -5
- data/lib/sidekiq/testing.rb +91 -90
- data/lib/sidekiq/transaction_aware_client.rb +51 -0
- data/lib/sidekiq/version.rb +3 -1
- data/lib/sidekiq/web/action.rb +20 -11
- data/lib/sidekiq/web/application.rb +202 -80
- data/lib/sidekiq/web/csrf_protection.rb +183 -0
- data/lib/sidekiq/web/helpers.rb +165 -114
- data/lib/sidekiq/web/router.rb +23 -19
- data/lib/sidekiq/web.rb +68 -107
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +92 -182
- data/sidekiq.gemspec +25 -16
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/javascripts/application.js +152 -61
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +182 -0
- data/web/assets/javascripts/dashboard.js +35 -293
- data/web/assets/javascripts/metrics.js +298 -0
- data/web/assets/stylesheets/application-dark.css +147 -0
- data/web/assets/stylesheets/application-rtl.css +10 -93
- data/web/assets/stylesheets/application.css +124 -522
- data/web/assets/stylesheets/bootstrap.css +1 -1
- data/web/locales/ar.yml +71 -65
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -53
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +86 -66
- data/web/locales/es.yml +70 -54
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +83 -62
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +75 -64
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +83 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +68 -63
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +18 -3
- data/web/views/_job_info.erb +21 -4
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +1 -1
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +79 -29
- data/web/views/dashboard.erb +48 -18
- data/web/views/dead.erb +3 -3
- data/web/views/filtering.erb +7 -0
- data/web/views/layout.erb +3 -1
- data/web/views/metrics.erb +91 -0
- data/web/views/metrics_for_job.erb +59 -0
- data/web/views/morgue.erb +14 -15
- data/web/views/queue.erb +33 -24
- data/web/views/queues.erb +19 -5
- data/web/views/retries.erb +16 -17
- data/web/views/retry.erb +3 -3
- data/web/views/scheduled.erb +17 -15
- metadata +71 -72
- data/.github/contributing.md +0 -32
- data/.github/issue_template.md +0 -11
- data/.gitignore +0 -15
- data/.travis.yml +0 -17
- data/3.0-Upgrade.md +0 -70
- data/4.0-Upgrade.md +0 -53
- data/5.0-Upgrade.md +0 -56
- data/Appraisals +0 -9
- data/COMM-LICENSE +0 -95
- data/Ent-Changes.md +0 -225
- data/Gemfile +0 -29
- data/LICENSE +0 -9
- data/Pro-2.0-Upgrade.md +0 -138
- data/Pro-3.0-Upgrade.md +0 -44
- data/Pro-4.0-Upgrade.md +0 -35
- data/Pro-Changes.md +0 -752
- data/Rakefile +0 -9
- data/bin/sidekiqctl +0 -237
- data/code_of_conduct.md +0 -50
- data/gemfiles/rails_4.gemfile +0 -31
- data/gemfiles/rails_5.gemfile +0 -31
- data/lib/generators/sidekiq/worker_generator.rb +0 -49
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/delay.rb +0 -42
- data/lib/sidekiq/exception_handler.rb +0 -29
- data/lib/sidekiq/extensions/action_mailer.rb +0 -57
- data/lib/sidekiq/extensions/active_record.rb +0 -40
- data/lib/sidekiq/extensions/class_methods.rb +0 -40
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
- data/lib/sidekiq/util.rb +0 -66
- data/lib/sidekiq/worker.rb +0 -215
@@ -0,0 +1,287 @@
|
|
1
|
+
require "forwardable"
|
2
|
+
|
3
|
+
require "set"
|
4
|
+
require "sidekiq/redis_connection"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
# Sidekiq::Config represents the global configuration for an instance of Sidekiq.
|
8
|
+
class Config
|
9
|
+
extend Forwardable
|
10
|
+
|
11
|
+
DEFAULTS = {
|
12
|
+
labels: Set.new,
|
13
|
+
require: ".",
|
14
|
+
environment: nil,
|
15
|
+
concurrency: 5,
|
16
|
+
timeout: 25,
|
17
|
+
poll_interval_average: nil,
|
18
|
+
average_scheduled_poll_interval: 5,
|
19
|
+
on_complex_arguments: :raise,
|
20
|
+
error_handlers: [],
|
21
|
+
death_handlers: [],
|
22
|
+
lifecycle_events: {
|
23
|
+
startup: [],
|
24
|
+
quiet: [],
|
25
|
+
shutdown: [],
|
26
|
+
# triggers when we fire the first heartbeat on startup OR repairing a network partition
|
27
|
+
heartbeat: [],
|
28
|
+
# triggers on EVERY heartbeat call, every 10 seconds
|
29
|
+
beat: []
|
30
|
+
},
|
31
|
+
dead_max_jobs: 10_000,
|
32
|
+
dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
|
33
|
+
reloader: proc { |&block| block.call },
|
34
|
+
backtrace_cleaner: ->(backtrace) { backtrace }
|
35
|
+
}
|
36
|
+
|
37
|
+
ERROR_HANDLER = ->(ex, ctx, cfg = Sidekiq.default_configuration) {
|
38
|
+
l = cfg.logger
|
39
|
+
l.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
|
40
|
+
l.warn("#{ex.class.name}: #{ex.message}")
|
41
|
+
unless ex.backtrace.nil?
|
42
|
+
backtrace = cfg[:backtrace_cleaner].call(ex.backtrace)
|
43
|
+
l.warn(backtrace.join("\n"))
|
44
|
+
end
|
45
|
+
}
|
46
|
+
|
47
|
+
def initialize(options = {})
|
48
|
+
@options = DEFAULTS.merge(options)
|
49
|
+
@options[:error_handlers] << ERROR_HANDLER if @options[:error_handlers].empty?
|
50
|
+
@directory = {}
|
51
|
+
@redis_config = {}
|
52
|
+
@capsules = {}
|
53
|
+
end
|
54
|
+
|
55
|
+
def_delegators :@options, :[], :[]=, :fetch, :key?, :has_key?, :merge!
|
56
|
+
attr_reader :capsules
|
57
|
+
|
58
|
+
def to_json(*)
|
59
|
+
Sidekiq.dump_json(@options)
|
60
|
+
end
|
61
|
+
|
62
|
+
# LEGACY: edits the default capsule
|
63
|
+
# config.concurrency = 5
|
64
|
+
def concurrency=(val)
|
65
|
+
default_capsule.concurrency = Integer(val)
|
66
|
+
end
|
67
|
+
|
68
|
+
def concurrency
|
69
|
+
default_capsule.concurrency
|
70
|
+
end
|
71
|
+
|
72
|
+
def total_concurrency
|
73
|
+
capsules.each_value.sum(&:concurrency)
|
74
|
+
end
|
75
|
+
|
76
|
+
# Edit the default capsule.
|
77
|
+
# config.queues = %w( high default low ) # strict
|
78
|
+
# config.queues = %w( high,3 default,2 low,1 ) # weighted
|
79
|
+
# config.queues = %w( feature1,1 feature2,1 feature3,1 ) # random
|
80
|
+
#
|
81
|
+
# With weighted priority, queue will be checked first (weight / total) of the time.
|
82
|
+
# high will be checked first (3/6) or 50% of the time.
|
83
|
+
# I'd recommend setting weights between 1-10. Weights in the hundreds or thousands
|
84
|
+
# are ridiculous and unnecessarily expensive. You can get random queue ordering
|
85
|
+
# by explicitly setting all weights to 1.
|
86
|
+
def queues=(val)
|
87
|
+
default_capsule.queues = val
|
88
|
+
end
|
89
|
+
|
90
|
+
def queues
|
91
|
+
default_capsule.queues
|
92
|
+
end
|
93
|
+
|
94
|
+
def client_middleware
|
95
|
+
@client_chain ||= Sidekiq::Middleware::Chain.new(self)
|
96
|
+
yield @client_chain if block_given?
|
97
|
+
@client_chain
|
98
|
+
end
|
99
|
+
|
100
|
+
def server_middleware
|
101
|
+
@server_chain ||= Sidekiq::Middleware::Chain.new(self)
|
102
|
+
yield @server_chain if block_given?
|
103
|
+
@server_chain
|
104
|
+
end
|
105
|
+
|
106
|
+
def default_capsule(&block)
|
107
|
+
capsule("default", &block)
|
108
|
+
end
|
109
|
+
|
110
|
+
# register a new queue processing subsystem
|
111
|
+
def capsule(name)
|
112
|
+
nm = name.to_s
|
113
|
+
cap = @capsules.fetch(nm) do
|
114
|
+
cap = Sidekiq::Capsule.new(nm, self)
|
115
|
+
@capsules[nm] = cap
|
116
|
+
end
|
117
|
+
yield cap if block_given?
|
118
|
+
cap
|
119
|
+
end
|
120
|
+
|
121
|
+
# All capsules must use the same Redis configuration
|
122
|
+
def redis=(hash)
|
123
|
+
@redis_config = @redis_config.merge(hash)
|
124
|
+
end
|
125
|
+
|
126
|
+
def redis_pool
|
127
|
+
Thread.current[:sidekiq_redis_pool] || Thread.current[:sidekiq_capsule]&.redis_pool || local_redis_pool
|
128
|
+
end
|
129
|
+
|
130
|
+
private def local_redis_pool
|
131
|
+
# this is our internal client/housekeeping pool. each capsule has its
|
132
|
+
# own pool for executing threads.
|
133
|
+
@redis ||= new_redis_pool(10, "internal")
|
134
|
+
end
|
135
|
+
|
136
|
+
def new_redis_pool(size, name = "unset")
|
137
|
+
# connection pool is lazy, it will not create connections unless you actually need them
|
138
|
+
# so don't be skimpy!
|
139
|
+
RedisConnection.create({size: size, logger: logger, pool_name: name}.merge(@redis_config))
|
140
|
+
end
|
141
|
+
|
142
|
+
def redis_info
|
143
|
+
redis do |conn|
|
144
|
+
conn.call("INFO") { |i| i.lines(chomp: true).map { |l| l.split(":", 2) }.select { |l| l.size == 2 }.to_h }
|
145
|
+
rescue RedisClientAdapter::CommandError => ex
|
146
|
+
# 2850 return fake version when INFO command has (probably) been renamed
|
147
|
+
raise unless /unknown command/.match?(ex.message)
|
148
|
+
{
|
149
|
+
"redis_version" => "9.9.9",
|
150
|
+
"uptime_in_days" => "9999",
|
151
|
+
"connected_clients" => "9999",
|
152
|
+
"used_memory_human" => "9P",
|
153
|
+
"used_memory_peak_human" => "9P"
|
154
|
+
}.freeze
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
158
|
+
def redis
|
159
|
+
raise ArgumentError, "requires a block" unless block_given?
|
160
|
+
redis_pool.with do |conn|
|
161
|
+
retryable = true
|
162
|
+
begin
|
163
|
+
yield conn
|
164
|
+
rescue RedisClientAdapter::BaseError => ex
|
165
|
+
# 2550 Failover can cause the server to become a replica, need
|
166
|
+
# to disconnect and reopen the socket to get back to the primary.
|
167
|
+
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
168
|
+
# 4985 Use the same logic when a blocking command is force-unblocked
|
169
|
+
# The same retry logic is also used in client.rb
|
170
|
+
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
|
171
|
+
conn.close
|
172
|
+
retryable = false
|
173
|
+
retry
|
174
|
+
end
|
175
|
+
raise
|
176
|
+
end
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
# register global singletons which can be accessed elsewhere
|
181
|
+
def register(name, instance)
|
182
|
+
@directory[name] = instance
|
183
|
+
end
|
184
|
+
|
185
|
+
# find a singleton
|
186
|
+
def lookup(name, default_class = nil)
|
187
|
+
# JNDI is just a fancy name for a hash lookup
|
188
|
+
@directory.fetch(name) do |key|
|
189
|
+
return nil unless default_class
|
190
|
+
@directory[key] = default_class.new(self)
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
##
|
195
|
+
# Death handlers are called when all retries for a job have been exhausted and
|
196
|
+
# the job dies. It's the notification to your application
|
197
|
+
# that this job will not succeed without manual intervention.
|
198
|
+
#
|
199
|
+
# Sidekiq.configure_server do |config|
|
200
|
+
# config.death_handlers << ->(job, ex) do
|
201
|
+
# end
|
202
|
+
# end
|
203
|
+
def death_handlers
|
204
|
+
@options[:death_handlers]
|
205
|
+
end
|
206
|
+
|
207
|
+
# How frequently Redis should be checked by a random Sidekiq process for
|
208
|
+
# scheduled and retriable jobs. Each individual process will take turns by
|
209
|
+
# waiting some multiple of this value.
|
210
|
+
#
|
211
|
+
# See sidekiq/scheduled.rb for an in-depth explanation of this value
|
212
|
+
def average_scheduled_poll_interval=(interval)
|
213
|
+
@options[:average_scheduled_poll_interval] = interval
|
214
|
+
end
|
215
|
+
|
216
|
+
# Register a proc to handle any error which occurs within the Sidekiq process.
|
217
|
+
#
|
218
|
+
# Sidekiq.configure_server do |config|
|
219
|
+
# config.error_handlers << proc {|ex,ctx_hash| MyErrorService.notify(ex, ctx_hash) }
|
220
|
+
# end
|
221
|
+
#
|
222
|
+
# The default error handler logs errors to @logger.
|
223
|
+
def error_handlers
|
224
|
+
@options[:error_handlers]
|
225
|
+
end
|
226
|
+
|
227
|
+
# Register a block to run at a point in the Sidekiq lifecycle.
|
228
|
+
# :startup, :quiet or :shutdown are valid events.
|
229
|
+
#
|
230
|
+
# Sidekiq.configure_server do |config|
|
231
|
+
# config.on(:shutdown) do
|
232
|
+
# puts "Goodbye cruel world!"
|
233
|
+
# end
|
234
|
+
# end
|
235
|
+
def on(event, &block)
|
236
|
+
raise ArgumentError, "Symbols only please: #{event}" unless event.is_a?(Symbol)
|
237
|
+
raise ArgumentError, "Invalid event name: #{event}" unless @options[:lifecycle_events].key?(event)
|
238
|
+
@options[:lifecycle_events][event] << block
|
239
|
+
end
|
240
|
+
|
241
|
+
def logger
|
242
|
+
@logger ||= Sidekiq::Logger.new($stdout, level: :info).tap do |log|
|
243
|
+
log.level = Logger::INFO
|
244
|
+
log.formatter = if ENV["DYNO"]
|
245
|
+
Sidekiq::Logger::Formatters::WithoutTimestamp.new
|
246
|
+
else
|
247
|
+
Sidekiq::Logger::Formatters::Pretty.new
|
248
|
+
end
|
249
|
+
end
|
250
|
+
end
|
251
|
+
|
252
|
+
def logger=(logger)
|
253
|
+
if logger.nil?
|
254
|
+
self.logger.level = Logger::FATAL
|
255
|
+
return
|
256
|
+
end
|
257
|
+
|
258
|
+
@logger = logger
|
259
|
+
end
|
260
|
+
|
261
|
+
private def parameter_size(handler)
|
262
|
+
target = handler.is_a?(Proc) ? handler : handler.method(:call)
|
263
|
+
target.parameters.size
|
264
|
+
end
|
265
|
+
|
266
|
+
# INTERNAL USE ONLY
|
267
|
+
def handle_exception(ex, ctx = {})
|
268
|
+
if @options[:error_handlers].size == 0
|
269
|
+
p ["!!!!!", ex]
|
270
|
+
end
|
271
|
+
@options[:error_handlers].each do |handler|
|
272
|
+
if parameter_size(handler) == 2
|
273
|
+
# TODO Remove in 8.0
|
274
|
+
logger.info { "DEPRECATION: Sidekiq exception handlers now take three arguments, see #{handler}" }
|
275
|
+
handler.call(ex, {_config: self}.merge(ctx))
|
276
|
+
else
|
277
|
+
handler.call(ex, ctx, self)
|
278
|
+
end
|
279
|
+
rescue Exception => e
|
280
|
+
l = logger
|
281
|
+
l.error "!!! ERROR HANDLER THREW AN ERROR !!!"
|
282
|
+
l.error e
|
283
|
+
l.error e.backtrace.join("\n") unless e.backtrace.nil?
|
284
|
+
end
|
285
|
+
end
|
286
|
+
end
|
287
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
require "sidekiq/redis_connection"
|
2
|
+
require "time"
|
3
|
+
|
4
|
+
# This file is designed to be required within the user's
|
5
|
+
# deployment script; it should need a bare minimum of dependencies.
|
6
|
+
# Usage:
|
7
|
+
#
|
8
|
+
# require "sidekiq/deploy"
|
9
|
+
# Sidekiq::Deploy.mark!("Some change")
|
10
|
+
#
|
11
|
+
# If you do not pass a label, Sidekiq will try to use the latest
|
12
|
+
# git commit info.
|
13
|
+
#
|
14
|
+
|
15
|
+
module Sidekiq
|
16
|
+
class Deploy
|
17
|
+
MARK_TTL = 90 * 24 * 60 * 60 # 90 days
|
18
|
+
|
19
|
+
LABEL_MAKER = -> {
|
20
|
+
`git log -1 --format="%h %s"`.strip
|
21
|
+
}
|
22
|
+
|
23
|
+
def self.mark!(label = nil)
|
24
|
+
Sidekiq::Deploy.new.mark!(label: label)
|
25
|
+
end
|
26
|
+
|
27
|
+
def initialize(pool = Sidekiq::RedisConnection.create)
|
28
|
+
@pool = pool
|
29
|
+
end
|
30
|
+
|
31
|
+
def mark!(at: Time.now, label: nil)
|
32
|
+
label ||= LABEL_MAKER.call
|
33
|
+
# we need to round the timestamp so that we gracefully
|
34
|
+
# handle an very common error in marking deploys:
|
35
|
+
# having every process mark its deploy, leading
|
36
|
+
# to N marks for each deploy. Instead we round the time
|
37
|
+
# to the minute so that multiple marks within that minute
|
38
|
+
# will all naturally rollup into one mark per minute.
|
39
|
+
whence = at.utc
|
40
|
+
floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
|
41
|
+
datecode = floor.strftime("%Y%m%d")
|
42
|
+
key = "#{datecode}-marks"
|
43
|
+
stamp = floor.iso8601
|
44
|
+
|
45
|
+
@pool.with do |c|
|
46
|
+
# only allow one deploy mark for a given label for the next minute
|
47
|
+
lock = c.set("deploylock-#{label}", stamp, "nx", "ex", "60")
|
48
|
+
if lock
|
49
|
+
c.multi do |pipe|
|
50
|
+
pipe.hsetnx(key, stamp, label)
|
51
|
+
pipe.expire(key, MARK_TTL)
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
def fetch(date = Time.now.utc.to_date)
|
58
|
+
datecode = date.strftime("%Y%m%d")
|
59
|
+
@pool.with { |c| c.hgetall("#{datecode}-marks") }
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -0,0 +1,61 @@
|
|
1
|
+
require "sidekiq/component"
|
2
|
+
require "sidekiq/launcher"
|
3
|
+
require "sidekiq/metrics/tracking"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
class Embedded
|
7
|
+
include Sidekiq::Component
|
8
|
+
|
9
|
+
def initialize(config)
|
10
|
+
@config = config
|
11
|
+
end
|
12
|
+
|
13
|
+
def run
|
14
|
+
housekeeping
|
15
|
+
fire_event(:startup, reverse: false, reraise: true)
|
16
|
+
@launcher = Sidekiq::Launcher.new(@config, embedded: true)
|
17
|
+
@launcher.run
|
18
|
+
sleep 0.2 # pause to give threads time to spin up
|
19
|
+
|
20
|
+
logger.info "Sidekiq running embedded, total process thread count: #{Thread.list.size}"
|
21
|
+
logger.debug { Thread.list.map(&:name) }
|
22
|
+
end
|
23
|
+
|
24
|
+
def quiet
|
25
|
+
@launcher&.quiet
|
26
|
+
end
|
27
|
+
|
28
|
+
def stop
|
29
|
+
@launcher&.stop
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
def housekeeping
|
35
|
+
logger.info "Running in #{RUBY_DESCRIPTION}"
|
36
|
+
logger.info Sidekiq::LICENSE
|
37
|
+
logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
|
38
|
+
|
39
|
+
# touch the connection pool so it is created before we
|
40
|
+
# fire startup and start multithreading.
|
41
|
+
info = config.redis_info
|
42
|
+
ver = Gem::Version.new(info["redis_version"])
|
43
|
+
raise "You are connecting to Redis #{ver}, Sidekiq requires Redis 6.2.0 or greater" if ver < Gem::Version.new("6.2.0")
|
44
|
+
|
45
|
+
maxmemory_policy = info["maxmemory_policy"]
|
46
|
+
if maxmemory_policy != "noeviction"
|
47
|
+
logger.warn <<~EOM
|
48
|
+
|
49
|
+
|
50
|
+
WARNING: Your Redis instance will evict Sidekiq data under heavy load.
|
51
|
+
The 'noeviction' maxmemory policy is recommended (current policy: '#{maxmemory_policy}').
|
52
|
+
See: https://github.com/sidekiq/sidekiq/wiki/Using-Redis#memory
|
53
|
+
|
54
|
+
EOM
|
55
|
+
end
|
56
|
+
|
57
|
+
logger.debug { "Client Middleware: #{@config.default_capsule.client_middleware.map(&:klass).join(", ")}" }
|
58
|
+
logger.debug { "Server Middleware: #{@config.default_capsule.server_middleware.map(&:klass).join(", ")}" }
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
data/lib/sidekiq/fetch.rb
CHANGED
@@ -1,81 +1,88 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
require 'sidekiq'
|
3
2
|
|
4
|
-
|
3
|
+
require "sidekiq"
|
4
|
+
require "sidekiq/component"
|
5
|
+
require "sidekiq/capsule"
|
6
|
+
|
7
|
+
module Sidekiq # :nodoc:
|
5
8
|
class BasicFetch
|
9
|
+
include Sidekiq::Component
|
6
10
|
# We want the fetch operation to timeout every few seconds so the thread
|
7
11
|
# can check if the process is shutting down.
|
8
12
|
TIMEOUT = 2
|
9
13
|
|
10
|
-
UnitOfWork = Struct.new(:queue, :job)
|
14
|
+
UnitOfWork = Struct.new(:queue, :job, :config) {
|
11
15
|
def acknowledge
|
12
16
|
# nothing to do
|
13
17
|
end
|
14
18
|
|
15
19
|
def queue_name
|
16
|
-
queue.
|
20
|
+
queue.delete_prefix("queue:")
|
17
21
|
end
|
18
22
|
|
19
23
|
def requeue
|
20
|
-
|
21
|
-
conn.rpush(
|
24
|
+
config.redis do |conn|
|
25
|
+
conn.rpush(queue, job)
|
22
26
|
end
|
23
27
|
end
|
24
|
-
|
28
|
+
}
|
25
29
|
|
26
|
-
def initialize(
|
27
|
-
|
28
|
-
@
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
end
|
30
|
+
def initialize(cap)
|
31
|
+
raise ArgumentError, "missing queue list" unless cap.queues
|
32
|
+
@config = cap
|
33
|
+
@strictly_ordered_queues = cap.mode == :strict
|
34
|
+
@queues = config.queues.map { |q| "queue:#{q}" }
|
35
|
+
@queues.uniq! if @strictly_ordered_queues
|
33
36
|
end
|
34
37
|
|
35
38
|
def retrieve_work
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
# data from the first queue that has pending elements. We
|
43
|
-
# recreate the queue command each time we invoke Redis#brpop
|
44
|
-
# to honor weights and avoid queue starvation.
|
45
|
-
def queues_cmd
|
46
|
-
if @strictly_ordered_queues
|
47
|
-
@queues
|
48
|
-
else
|
49
|
-
queues = @queues.shuffle.uniq
|
50
|
-
queues << TIMEOUT
|
51
|
-
queues
|
39
|
+
qs = queues_cmd
|
40
|
+
# 4825 Sidekiq Pro with all queues paused will return an
|
41
|
+
# empty set of queues
|
42
|
+
if qs.size <= 0
|
43
|
+
sleep(TIMEOUT)
|
44
|
+
return nil
|
52
45
|
end
|
53
|
-
end
|
54
46
|
|
47
|
+
queue, job = redis { |conn| conn.blocking_call(conn.read_timeout + TIMEOUT, "brpop", *qs, TIMEOUT) }
|
48
|
+
UnitOfWork.new(queue, job, config) if queue
|
49
|
+
end
|
55
50
|
|
56
|
-
|
57
|
-
# an instance method will make it async to the Fetcher actor
|
58
|
-
def self.bulk_requeue(inprogress, options)
|
51
|
+
def bulk_requeue(inprogress)
|
59
52
|
return if inprogress.empty?
|
60
53
|
|
61
|
-
|
54
|
+
logger.debug { "Re-queueing terminated jobs" }
|
62
55
|
jobs_to_requeue = {}
|
63
56
|
inprogress.each do |unit_of_work|
|
64
|
-
jobs_to_requeue[unit_of_work.
|
65
|
-
jobs_to_requeue[unit_of_work.
|
57
|
+
jobs_to_requeue[unit_of_work.queue] ||= []
|
58
|
+
jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
|
66
59
|
end
|
67
60
|
|
68
|
-
|
69
|
-
conn.pipelined do
|
61
|
+
redis do |conn|
|
62
|
+
conn.pipelined do |pipeline|
|
70
63
|
jobs_to_requeue.each do |queue, jobs|
|
71
|
-
|
64
|
+
pipeline.rpush(queue, jobs)
|
72
65
|
end
|
73
66
|
end
|
74
67
|
end
|
75
|
-
|
68
|
+
logger.info("Pushed #{inprogress.size} jobs back to Redis")
|
76
69
|
rescue => ex
|
77
|
-
|
70
|
+
logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
|
78
71
|
end
|
79
72
|
|
73
|
+
# Creating the Redis#brpop command takes into account any
|
74
|
+
# configured queue weights. By default Redis#brpop returns
|
75
|
+
# data from the first queue that has pending elements. We
|
76
|
+
# recreate the queue command each time we invoke Redis#brpop
|
77
|
+
# to honor weights and avoid queue starvation.
|
78
|
+
def queues_cmd
|
79
|
+
if @strictly_ordered_queues
|
80
|
+
@queues
|
81
|
+
else
|
82
|
+
permute = @queues.shuffle
|
83
|
+
permute.uniq!
|
84
|
+
permute
|
85
|
+
end
|
86
|
+
end
|
80
87
|
end
|
81
88
|
end
|