sidekiq 6.4.2 → 6.5.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Changes.md +89 -0
- data/bin/sidekiqload +17 -5
- data/lib/sidekiq/api.rb +196 -45
- data/lib/sidekiq/cli.rb +46 -32
- data/lib/sidekiq/client.rb +6 -6
- data/lib/sidekiq/component.rb +65 -0
- data/lib/sidekiq/delay.rb +1 -1
- data/lib/sidekiq/fetch.rb +18 -16
- data/lib/sidekiq/job_retry.rb +60 -39
- data/lib/sidekiq/job_util.rb +7 -3
- data/lib/sidekiq/launcher.rb +24 -21
- data/lib/sidekiq/logger.rb +1 -1
- data/lib/sidekiq/manager.rb +23 -20
- data/lib/sidekiq/metrics/deploy.rb +47 -0
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +94 -0
- data/lib/sidekiq/metrics/tracking.rb +134 -0
- data/lib/sidekiq/middleware/chain.rb +82 -38
- data/lib/sidekiq/middleware/current_attributes.rb +18 -12
- data/lib/sidekiq/middleware/i18n.rb +2 -0
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +1 -1
- data/lib/sidekiq/paginator.rb +11 -3
- data/lib/sidekiq/processor.rb +21 -15
- data/lib/sidekiq/rails.rb +12 -13
- data/lib/sidekiq/redis_client_adapter.rb +154 -0
- data/lib/sidekiq/redis_connection.rb +78 -47
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +53 -24
- data/lib/sidekiq/testing.rb +1 -1
- data/lib/sidekiq/transaction_aware_client.rb +45 -0
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web/action.rb +3 -3
- data/lib/sidekiq/web/application.rb +21 -5
- data/lib/sidekiq/web/helpers.rb +18 -5
- data/lib/sidekiq/web.rb +5 -1
- data/lib/sidekiq/worker.rb +8 -4
- data/lib/sidekiq.rb +87 -18
- data/sidekiq.gemspec +2 -2
- data/web/assets/javascripts/application.js +2 -1
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard.js +0 -17
- data/web/assets/javascripts/graph.js +16 -0
- data/web/assets/javascripts/metrics.js +262 -0
- data/web/assets/stylesheets/application.css +44 -1
- data/web/locales/el.yml +43 -19
- data/web/locales/en.yml +7 -0
- data/web/locales/ja.yml +7 -0
- data/web/locales/pt-br.yml +27 -9
- data/web/locales/zh-cn.yml +36 -11
- data/web/locales/zh-tw.yml +32 -7
- data/web/views/_nav.erb +1 -1
- data/web/views/busy.erb +7 -2
- data/web/views/dashboard.erb +1 -0
- data/web/views/metrics.erb +69 -0
- data/web/views/metrics_for_job.erb +87 -0
- data/web/views/queue.erb +5 -1
- metadata +34 -9
- data/lib/sidekiq/exception_handler.rb +0 -27
- data/lib/sidekiq/util.rb +0 -108
data/lib/sidekiq/cli.rb
CHANGED
@@ -9,18 +9,34 @@ require "erb"
|
|
9
9
|
require "fileutils"
|
10
10
|
|
11
11
|
require "sidekiq"
|
12
|
+
require "sidekiq/component"
|
12
13
|
require "sidekiq/launcher"
|
13
|
-
require "sidekiq/util"
|
14
14
|
|
15
|
-
module
|
15
|
+
# module ScoutApm
|
16
|
+
# VERSION = "5.3.1"
|
17
|
+
# end
|
18
|
+
fail <<~EOM if defined?(ScoutApm::VERSION) && ScoutApm::VERSION < "5.2.0"
|
19
|
+
|
20
|
+
|
21
|
+
scout_apm v#{ScoutApm::VERSION} is unsafe with Sidekiq 6.5. Please run `bundle up scout_apm` to upgrade to 5.2.0 or greater.
|
22
|
+
|
23
|
+
|
24
|
+
EOM
|
25
|
+
|
26
|
+
module Sidekiq # :nodoc:
|
16
27
|
class CLI
|
17
|
-
include
|
28
|
+
include Sidekiq::Component
|
18
29
|
include Singleton unless $TESTING
|
19
30
|
|
20
31
|
attr_accessor :launcher
|
21
32
|
attr_accessor :environment
|
33
|
+
attr_accessor :config
|
22
34
|
|
23
35
|
def parse(args = ARGV.dup)
|
36
|
+
@config = Sidekiq
|
37
|
+
@config[:error_handlers].clear
|
38
|
+
@config[:error_handlers] << @config.method(:default_error_handler)
|
39
|
+
|
24
40
|
setup_options(args)
|
25
41
|
initialize_logger
|
26
42
|
validate!
|
@@ -36,7 +52,7 @@ module Sidekiq
|
|
36
52
|
def run(boot_app: true)
|
37
53
|
boot_application if boot_app
|
38
54
|
|
39
|
-
if environment == "development" && $stdout.tty? &&
|
55
|
+
if environment == "development" && $stdout.tty? && @config.log_formatter.is_a?(Sidekiq::Logger::Formatters::Pretty)
|
40
56
|
print_banner
|
41
57
|
end
|
42
58
|
logger.info "Booted Rails #{::Rails.version} application in #{environment} environment" if rails_app?
|
@@ -67,7 +83,7 @@ module Sidekiq
|
|
67
83
|
|
68
84
|
# touch the connection pool so it is created before we
|
69
85
|
# fire startup and start multithreading.
|
70
|
-
info =
|
86
|
+
info = @config.redis_info
|
71
87
|
ver = info["redis_version"]
|
72
88
|
raise "You are connecting to Redis v#{ver}, Sidekiq requires Redis v4.0.0 or greater" if ver < "4"
|
73
89
|
|
@@ -85,22 +101,22 @@ module Sidekiq
|
|
85
101
|
|
86
102
|
# Since the user can pass us a connection pool explicitly in the initializer, we
|
87
103
|
# need to verify the size is large enough or else Sidekiq's performance is dramatically slowed.
|
88
|
-
cursize =
|
89
|
-
needed =
|
104
|
+
cursize = @config.redis_pool.size
|
105
|
+
needed = @config[:concurrency] + 2
|
90
106
|
raise "Your pool of #{cursize} Redis connections is too small, please increase the size to at least #{needed}" if cursize < needed
|
91
107
|
|
92
108
|
# cache process identity
|
93
|
-
|
109
|
+
@config[:identity] = identity
|
94
110
|
|
95
111
|
# Touch middleware so it isn't lazy loaded by multiple threads, #3043
|
96
|
-
|
112
|
+
@config.server_middleware
|
97
113
|
|
98
114
|
# Before this point, the process is initializing with just the main thread.
|
99
115
|
# Starting here the process will now have multiple threads running.
|
100
116
|
fire_event(:startup, reverse: false, reraise: true)
|
101
117
|
|
102
|
-
logger.debug { "Client Middleware: #{
|
103
|
-
logger.debug { "Server Middleware: #{
|
118
|
+
logger.debug { "Client Middleware: #{@config.client_middleware.map(&:klass).join(", ")}" }
|
119
|
+
logger.debug { "Server Middleware: #{@config.server_middleware.map(&:klass).join(", ")}" }
|
104
120
|
|
105
121
|
launch(self_read)
|
106
122
|
end
|
@@ -110,7 +126,7 @@ module Sidekiq
|
|
110
126
|
logger.info "Starting processing, hit Ctrl-C to stop"
|
111
127
|
end
|
112
128
|
|
113
|
-
@launcher = Sidekiq::Launcher.new(
|
129
|
+
@launcher = Sidekiq::Launcher.new(@config)
|
114
130
|
|
115
131
|
begin
|
116
132
|
launcher.run
|
@@ -173,25 +189,25 @@ module Sidekiq
|
|
173
189
|
# Heroku sends TERM and then waits 30 seconds for process to exit.
|
174
190
|
"TERM" => ->(cli) { raise Interrupt },
|
175
191
|
"TSTP" => ->(cli) {
|
176
|
-
|
192
|
+
cli.logger.info "Received TSTP, no longer accepting new work"
|
177
193
|
cli.launcher.quiet
|
178
194
|
},
|
179
195
|
"TTIN" => ->(cli) {
|
180
196
|
Thread.list.each do |thread|
|
181
|
-
|
197
|
+
cli.logger.warn "Thread TID-#{(thread.object_id ^ ::Process.pid).to_s(36)} #{thread.name}"
|
182
198
|
if thread.backtrace
|
183
|
-
|
199
|
+
cli.logger.warn thread.backtrace.join("\n")
|
184
200
|
else
|
185
|
-
|
201
|
+
cli.logger.warn "<no backtrace available>"
|
186
202
|
end
|
187
203
|
end
|
188
204
|
}
|
189
205
|
}
|
190
|
-
UNHANDLED_SIGNAL_HANDLER = ->(cli) {
|
206
|
+
UNHANDLED_SIGNAL_HANDLER = ->(cli) { cli.logger.info "No signal handler registered, ignoring" }
|
191
207
|
SIGNAL_HANDLERS.default = UNHANDLED_SIGNAL_HANDLER
|
192
208
|
|
193
209
|
def handle_signal(sig)
|
194
|
-
|
210
|
+
logger.debug "Got #{sig} signal"
|
195
211
|
SIGNAL_HANDLERS[sig].call(self)
|
196
212
|
end
|
197
213
|
|
@@ -209,6 +225,7 @@ module Sidekiq
|
|
209
225
|
# Both Sinatra 2.0+ and Sidekiq support this term.
|
210
226
|
# RAILS_ENV and RACK_ENV are there for legacy support.
|
211
227
|
@environment = cli_env || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
|
228
|
+
config[:environment] = @environment
|
212
229
|
end
|
213
230
|
|
214
231
|
def symbolize_keys_deep!(hash)
|
@@ -237,7 +254,7 @@ module Sidekiq
|
|
237
254
|
config_dir = if File.directory?(opts[:require].to_s)
|
238
255
|
File.join(opts[:require], "config")
|
239
256
|
else
|
240
|
-
File.join(
|
257
|
+
File.join(@config[:require], "config")
|
241
258
|
end
|
242
259
|
|
243
260
|
%w[sidekiq.yml sidekiq.yml.erb].each do |config_file|
|
@@ -254,27 +271,23 @@ module Sidekiq
|
|
254
271
|
opts[:concurrency] = Integer(ENV["RAILS_MAX_THREADS"]) if opts[:concurrency].nil? && ENV["RAILS_MAX_THREADS"]
|
255
272
|
|
256
273
|
# merge with defaults
|
257
|
-
|
258
|
-
end
|
259
|
-
|
260
|
-
def options
|
261
|
-
Sidekiq.options
|
274
|
+
@config.merge!(opts)
|
262
275
|
end
|
263
276
|
|
264
277
|
def boot_application
|
265
278
|
ENV["RACK_ENV"] = ENV["RAILS_ENV"] = environment
|
266
279
|
|
267
|
-
if File.directory?(
|
280
|
+
if File.directory?(@config[:require])
|
268
281
|
require "rails"
|
269
282
|
if ::Rails::VERSION::MAJOR < 5
|
270
283
|
raise "Sidekiq no longer supports this version of Rails"
|
271
284
|
else
|
272
285
|
require "sidekiq/rails"
|
273
|
-
require File.expand_path("#{
|
286
|
+
require File.expand_path("#{@config[:require]}/config/environment.rb")
|
274
287
|
end
|
275
|
-
|
288
|
+
@config[:tag] ||= default_tag
|
276
289
|
else
|
277
|
-
require
|
290
|
+
require @config[:require]
|
278
291
|
end
|
279
292
|
end
|
280
293
|
|
@@ -291,8 +304,8 @@ module Sidekiq
|
|
291
304
|
end
|
292
305
|
|
293
306
|
def validate!
|
294
|
-
if !File.exist?(
|
295
|
-
(File.directory?(
|
307
|
+
if !File.exist?(@config[:require]) ||
|
308
|
+
(File.directory?(@config[:require]) && !File.exist?("#{@config[:require]}/config/application.rb"))
|
296
309
|
logger.info "=================================================================="
|
297
310
|
logger.info " Please point Sidekiq to a Rails application or a Ruby file "
|
298
311
|
logger.info " to load your job classes with -r [DIR|FILE]."
|
@@ -302,7 +315,7 @@ module Sidekiq
|
|
302
315
|
end
|
303
316
|
|
304
317
|
[:concurrency, :timeout].each do |opt|
|
305
|
-
raise ArgumentError, "#{opt}: #{
|
318
|
+
raise ArgumentError, "#{opt}: #{@config[opt]} is not a valid value" if @config[opt].to_i <= 0
|
306
319
|
end
|
307
320
|
end
|
308
321
|
|
@@ -376,7 +389,7 @@ module Sidekiq
|
|
376
389
|
end
|
377
390
|
|
378
391
|
def initialize_logger
|
379
|
-
|
392
|
+
@config.logger.level = ::Logger::DEBUG if @config[:verbose]
|
380
393
|
end
|
381
394
|
|
382
395
|
def parse_config(path)
|
@@ -425,3 +438,4 @@ module Sidekiq
|
|
425
438
|
end
|
426
439
|
|
427
440
|
require "sidekiq/systemd"
|
441
|
+
require "sidekiq/metrics/tracking" if ENV["SIDEKIQ_METRICS_BETA"]
|
data/lib/sidekiq/client.rb
CHANGED
@@ -71,7 +71,7 @@ module Sidekiq
|
|
71
71
|
#
|
72
72
|
def push(item)
|
73
73
|
normed = normalize_item(item)
|
74
|
-
payload = middleware.invoke(
|
74
|
+
payload = middleware.invoke(item["class"], normed, normed["queue"], @redis_pool) do
|
75
75
|
normed
|
76
76
|
end
|
77
77
|
if payload
|
@@ -110,7 +110,7 @@ module Sidekiq
|
|
110
110
|
payloads = args.map.with_index { |job_args, index|
|
111
111
|
copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
|
112
112
|
copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
|
113
|
-
result = middleware.invoke(
|
113
|
+
result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
|
114
114
|
verify_json(copy)
|
115
115
|
copy
|
116
116
|
end
|
@@ -176,7 +176,7 @@ module Sidekiq
|
|
176
176
|
def enqueue_to_in(queue, interval, klass, *args)
|
177
177
|
int = interval.to_f
|
178
178
|
now = Time.now.to_f
|
179
|
-
ts = (int < 1_000_000_000 ? now + int : int)
|
179
|
+
ts = ((int < 1_000_000_000) ? now + int : int)
|
180
180
|
|
181
181
|
item = {"class" => klass, "args" => args, "at" => ts, "queue" => queue}
|
182
182
|
item.delete("at") if ts <= now
|
@@ -201,7 +201,7 @@ module Sidekiq
|
|
201
201
|
conn.pipelined do |pipeline|
|
202
202
|
atomic_push(pipeline, payloads)
|
203
203
|
end
|
204
|
-
rescue
|
204
|
+
rescue RedisConnection.adapter::BaseError => ex
|
205
205
|
# 2550 Failover can cause the server to become a replica, need
|
206
206
|
# to disconnect and reopen the socket to get back to the primary.
|
207
207
|
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
@@ -220,7 +220,7 @@ module Sidekiq
|
|
220
220
|
|
221
221
|
def atomic_push(conn, payloads)
|
222
222
|
if payloads.first.key?("at")
|
223
|
-
conn.zadd("schedule", payloads.
|
223
|
+
conn.zadd("schedule", payloads.flat_map { |hash|
|
224
224
|
at = hash.delete("at").to_s
|
225
225
|
[at, Sidekiq.dump_json(hash)]
|
226
226
|
})
|
@@ -231,7 +231,7 @@ module Sidekiq
|
|
231
231
|
entry["enqueued_at"] = now
|
232
232
|
Sidekiq.dump_json(entry)
|
233
233
|
}
|
234
|
-
conn.sadd("queues", queue)
|
234
|
+
conn.sadd("queues", [queue])
|
235
235
|
conn.lpush("queue:#{queue}", to_push)
|
236
236
|
end
|
237
237
|
end
|
@@ -0,0 +1,65 @@
|
|
1
|
+
module Sidekiq
|
2
|
+
##
|
3
|
+
# Sidekiq::Component assumes a config instance is available at @config
|
4
|
+
module Component # :nodoc:
|
5
|
+
attr_reader :config
|
6
|
+
|
7
|
+
def watchdog(last_words)
|
8
|
+
yield
|
9
|
+
rescue Exception => ex
|
10
|
+
handle_exception(ex, {context: last_words})
|
11
|
+
raise ex
|
12
|
+
end
|
13
|
+
|
14
|
+
def safe_thread(name, &block)
|
15
|
+
Thread.new do
|
16
|
+
Thread.current.name = name
|
17
|
+
watchdog(name, &block)
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
def logger
|
22
|
+
config.logger
|
23
|
+
end
|
24
|
+
|
25
|
+
def redis(&block)
|
26
|
+
config.redis(&block)
|
27
|
+
end
|
28
|
+
|
29
|
+
def tid
|
30
|
+
Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
|
31
|
+
end
|
32
|
+
|
33
|
+
def hostname
|
34
|
+
ENV["DYNO"] || Socket.gethostname
|
35
|
+
end
|
36
|
+
|
37
|
+
def process_nonce
|
38
|
+
@@process_nonce ||= SecureRandom.hex(6)
|
39
|
+
end
|
40
|
+
|
41
|
+
def identity
|
42
|
+
@@identity ||= "#{hostname}:#{::Process.pid}:#{process_nonce}"
|
43
|
+
end
|
44
|
+
|
45
|
+
def handle_exception(ex, ctx = {})
|
46
|
+
config.handle_exception(ex, ctx)
|
47
|
+
end
|
48
|
+
|
49
|
+
def fire_event(event, options = {})
|
50
|
+
oneshot = options.fetch(:oneshot, true)
|
51
|
+
reverse = options[:reverse]
|
52
|
+
reraise = options[:reraise]
|
53
|
+
|
54
|
+
arr = config[:lifecycle_events][event]
|
55
|
+
arr.reverse! if reverse
|
56
|
+
arr.each do |block|
|
57
|
+
block.call
|
58
|
+
rescue => ex
|
59
|
+
handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event})
|
60
|
+
raise ex if reraise
|
61
|
+
end
|
62
|
+
arr.clear if oneshot # once we've fired an event, we never fire it again
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
data/lib/sidekiq/delay.rb
CHANGED
data/lib/sidekiq/fetch.rb
CHANGED
@@ -1,14 +1,16 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require "sidekiq"
|
4
|
+
require "sidekiq/component"
|
4
5
|
|
5
|
-
module Sidekiq
|
6
|
+
module Sidekiq # :nodoc:
|
6
7
|
class BasicFetch
|
8
|
+
include Sidekiq::Component
|
7
9
|
# We want the fetch operation to timeout every few seconds so the thread
|
8
10
|
# can check if the process is shutting down.
|
9
11
|
TIMEOUT = 2
|
10
12
|
|
11
|
-
UnitOfWork = Struct.new(:queue, :job) {
|
13
|
+
UnitOfWork = Struct.new(:queue, :job, :config) {
|
12
14
|
def acknowledge
|
13
15
|
# nothing to do
|
14
16
|
end
|
@@ -18,20 +20,20 @@ module Sidekiq
|
|
18
20
|
end
|
19
21
|
|
20
22
|
def requeue
|
21
|
-
|
23
|
+
config.redis do |conn|
|
22
24
|
conn.rpush(queue, job)
|
23
25
|
end
|
24
26
|
end
|
25
27
|
}
|
26
28
|
|
27
|
-
def initialize(
|
28
|
-
raise ArgumentError, "missing queue list" unless
|
29
|
-
@
|
30
|
-
@strictly_ordered_queues = !!@
|
31
|
-
@queues = @
|
29
|
+
def initialize(config)
|
30
|
+
raise ArgumentError, "missing queue list" unless config[:queues]
|
31
|
+
@config = config
|
32
|
+
@strictly_ordered_queues = !!@config[:strict]
|
33
|
+
@queues = @config[:queues].map { |q| "queue:#{q}" }
|
32
34
|
if @strictly_ordered_queues
|
33
35
|
@queues.uniq!
|
34
|
-
@queues << TIMEOUT
|
36
|
+
@queues << {timeout: TIMEOUT}
|
35
37
|
end
|
36
38
|
end
|
37
39
|
|
@@ -44,30 +46,30 @@ module Sidekiq
|
|
44
46
|
return nil
|
45
47
|
end
|
46
48
|
|
47
|
-
|
48
|
-
UnitOfWork.new(
|
49
|
+
queue, job = redis { |conn| conn.brpop(*qs) }
|
50
|
+
UnitOfWork.new(queue, job, config) if queue
|
49
51
|
end
|
50
52
|
|
51
53
|
def bulk_requeue(inprogress, options)
|
52
54
|
return if inprogress.empty?
|
53
55
|
|
54
|
-
|
56
|
+
logger.debug { "Re-queueing terminated jobs" }
|
55
57
|
jobs_to_requeue = {}
|
56
58
|
inprogress.each do |unit_of_work|
|
57
59
|
jobs_to_requeue[unit_of_work.queue] ||= []
|
58
60
|
jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
|
59
61
|
end
|
60
62
|
|
61
|
-
|
63
|
+
redis do |conn|
|
62
64
|
conn.pipelined do |pipeline|
|
63
65
|
jobs_to_requeue.each do |queue, jobs|
|
64
66
|
pipeline.rpush(queue, jobs)
|
65
67
|
end
|
66
68
|
end
|
67
69
|
end
|
68
|
-
|
70
|
+
logger.info("Pushed #{inprogress.size} jobs back to Redis")
|
69
71
|
rescue => ex
|
70
|
-
|
72
|
+
logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
|
71
73
|
end
|
72
74
|
|
73
75
|
# Creating the Redis#brpop command takes into account any
|
@@ -81,7 +83,7 @@ module Sidekiq
|
|
81
83
|
else
|
82
84
|
permute = @queues.shuffle
|
83
85
|
permute.uniq!
|
84
|
-
permute << TIMEOUT
|
86
|
+
permute << {timeout: TIMEOUT}
|
85
87
|
permute
|
86
88
|
end
|
87
89
|
end
|
data/lib/sidekiq/job_retry.rb
CHANGED
@@ -1,10 +1,8 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "sidekiq/scheduled"
|
4
|
-
require "sidekiq/api"
|
5
|
-
|
6
3
|
require "zlib"
|
7
4
|
require "base64"
|
5
|
+
require "sidekiq/component"
|
8
6
|
|
9
7
|
module Sidekiq
|
10
8
|
##
|
@@ -66,12 +64,13 @@ module Sidekiq
|
|
66
64
|
|
67
65
|
class Skip < Handled; end
|
68
66
|
|
69
|
-
include Sidekiq::
|
67
|
+
include Sidekiq::Component
|
70
68
|
|
71
69
|
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
72
70
|
|
73
|
-
def initialize(options
|
74
|
-
@
|
71
|
+
def initialize(options)
|
72
|
+
@config = options
|
73
|
+
@max_retries = @config[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
|
75
74
|
end
|
76
75
|
|
77
76
|
# The global retry handler requires only the barest of data.
|
@@ -90,7 +89,7 @@ module Sidekiq
|
|
90
89
|
|
91
90
|
msg = Sidekiq.load_json(jobstr)
|
92
91
|
if msg["retry"]
|
93
|
-
|
92
|
+
process_retry(nil, msg, queue, e)
|
94
93
|
else
|
95
94
|
Sidekiq.death_handlers.each do |handler|
|
96
95
|
handler.call(msg, e)
|
@@ -127,7 +126,7 @@ module Sidekiq
|
|
127
126
|
end
|
128
127
|
|
129
128
|
raise e unless msg["retry"]
|
130
|
-
|
129
|
+
process_retry(jobinst, msg, queue, e)
|
131
130
|
# We've handled this error associated with this job, don't
|
132
131
|
# need to handle it at the global level
|
133
132
|
raise Skip
|
@@ -138,7 +137,7 @@ module Sidekiq
|
|
138
137
|
# Note that +jobinst+ can be nil here if an error is raised before we can
|
139
138
|
# instantiate the job instance. All access must be guarded and
|
140
139
|
# best effort.
|
141
|
-
def
|
140
|
+
def process_retry(jobinst, msg, queue, exception)
|
142
141
|
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
|
143
142
|
|
144
143
|
msg["queue"] = (msg["retry_queue"] || queue)
|
@@ -169,19 +168,49 @@ module Sidekiq
|
|
169
168
|
msg["error_backtrace"] = compress_backtrace(lines)
|
170
169
|
end
|
171
170
|
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
171
|
+
# Goodbye dear message, you (re)tried your best I'm sure.
|
172
|
+
return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
|
173
|
+
|
174
|
+
strategy, delay = delay_for(jobinst, count, exception)
|
175
|
+
case strategy
|
176
|
+
when :discard
|
177
|
+
return # poof!
|
178
|
+
when :kill
|
179
|
+
return retries_exhausted(jobinst, msg, exception)
|
180
|
+
end
|
181
|
+
|
182
|
+
# Logging here can break retries if the logging device raises ENOSPC #3979
|
183
|
+
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
184
|
+
jitter = rand(10) * (count + 1)
|
185
|
+
retry_at = Time.now.to_f + delay + jitter
|
186
|
+
payload = Sidekiq.dump_json(msg)
|
187
|
+
redis do |conn|
|
188
|
+
conn.zadd("retry", retry_at.to_s, payload)
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
# returns (strategy, seconds)
|
193
|
+
def delay_for(jobinst, count, exception)
|
194
|
+
rv = begin
|
195
|
+
# sidekiq_retry_in can return two different things:
|
196
|
+
# 1. When to retry next, as an integer of seconds
|
197
|
+
# 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
|
198
|
+
jobinst&.sidekiq_retry_in_block&.call(count, exception)
|
199
|
+
rescue Exception => e
|
200
|
+
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
|
201
|
+
nil
|
202
|
+
end
|
203
|
+
|
204
|
+
delay = (count**4) + 15
|
205
|
+
if Integer === rv && rv > 0
|
206
|
+
delay = rv
|
207
|
+
elsif rv == :discard
|
208
|
+
return [:discard, nil] # do nothing, job goes poof
|
209
|
+
elsif rv == :kill
|
210
|
+
return [:kill, nil]
|
184
211
|
end
|
212
|
+
|
213
|
+
[:default, delay]
|
185
214
|
end
|
186
215
|
|
187
216
|
def retries_exhausted(jobinst, msg, exception)
|
@@ -194,7 +223,7 @@ module Sidekiq
|
|
194
223
|
|
195
224
|
send_to_morgue(msg) unless msg["dead"] == false
|
196
225
|
|
197
|
-
|
226
|
+
config.death_handlers.each do |handler|
|
198
227
|
handler.call(msg, exception)
|
199
228
|
rescue => e
|
200
229
|
handle_exception(e, {context: "Error calling death handler", job: msg})
|
@@ -204,7 +233,15 @@ module Sidekiq
|
|
204
233
|
def send_to_morgue(msg)
|
205
234
|
logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
|
206
235
|
payload = Sidekiq.dump_json(msg)
|
207
|
-
|
236
|
+
now = Time.now.to_f
|
237
|
+
|
238
|
+
config.redis do |conn|
|
239
|
+
conn.multi do |xa|
|
240
|
+
xa.zadd("dead", now.to_s, payload)
|
241
|
+
xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds])
|
242
|
+
xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs])
|
243
|
+
end
|
244
|
+
end
|
208
245
|
end
|
209
246
|
|
210
247
|
def retry_attempts_from(msg_retry, default)
|
@@ -215,22 +252,6 @@ module Sidekiq
|
|
215
252
|
end
|
216
253
|
end
|
217
254
|
|
218
|
-
def delay_for(jobinst, count, exception)
|
219
|
-
jitter = rand(10) * (count + 1)
|
220
|
-
if jobinst&.sidekiq_retry_in_block
|
221
|
-
custom_retry_in = retry_in(jobinst, count, exception).to_i
|
222
|
-
return custom_retry_in + jitter if custom_retry_in > 0
|
223
|
-
end
|
224
|
-
(count**4) + 15 + jitter
|
225
|
-
end
|
226
|
-
|
227
|
-
def retry_in(jobinst, count, exception)
|
228
|
-
jobinst.sidekiq_retry_in_block.call(count, exception)
|
229
|
-
rescue Exception => e
|
230
|
-
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
|
231
|
-
nil
|
232
|
-
end
|
233
|
-
|
234
255
|
def exception_caused_by_shutdown?(e, checked_causes = [])
|
235
256
|
return false unless e.cause
|
236
257
|
|
data/lib/sidekiq/job_util.rb
CHANGED
@@ -4,7 +4,8 @@ require "time"
|
|
4
4
|
module Sidekiq
|
5
5
|
module JobUtil
|
6
6
|
# These functions encapsulate various job utilities.
|
7
|
-
|
7
|
+
|
8
|
+
TRANSIENT_ATTRIBUTES = %w[]
|
8
9
|
|
9
10
|
def validate(item)
|
10
11
|
raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
|
@@ -16,13 +17,13 @@ module Sidekiq
|
|
16
17
|
|
17
18
|
def verify_json(item)
|
18
19
|
job_class = item["wrapped"] || item["class"]
|
19
|
-
if Sidekiq
|
20
|
+
if Sidekiq[:on_complex_arguments] == :raise
|
20
21
|
msg = <<~EOM
|
21
22
|
Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
|
22
23
|
To disable this error, remove `Sidekiq.strict_args!` from your initializer.
|
23
24
|
EOM
|
24
25
|
raise(ArgumentError, msg) unless json_safe?(item)
|
25
|
-
elsif Sidekiq
|
26
|
+
elsif Sidekiq[:on_complex_arguments] == :warn
|
26
27
|
Sidekiq.logger.warn <<~EOM unless json_safe?(item)
|
27
28
|
Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
|
28
29
|
Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
|
@@ -42,6 +43,9 @@ module Sidekiq
|
|
42
43
|
|
43
44
|
raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
|
44
45
|
|
46
|
+
# remove job attributes which aren't necessary to persist into Redis
|
47
|
+
TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) }
|
48
|
+
|
45
49
|
item["jid"] ||= SecureRandom.hex(12)
|
46
50
|
item["class"] = item["class"].to_s
|
47
51
|
item["queue"] = item["queue"].to_s
|