sidekiq 6.4.1 → 7.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +107 -5
- data/README.md +14 -13
- data/bin/sidekiq +3 -8
- data/bin/sidekiqload +26 -29
- data/lib/sidekiq/api.rb +232 -157
- data/lib/sidekiq/capsule.rb +110 -0
- data/lib/sidekiq/cli.rb +80 -86
- data/lib/sidekiq/client.rb +54 -42
- data/lib/sidekiq/component.rb +66 -0
- data/lib/sidekiq/config.rb +271 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +20 -19
- data/lib/sidekiq/job.rb +375 -10
- data/lib/sidekiq/job_logger.rb +1 -1
- data/lib/sidekiq/job_retry.rb +74 -53
- data/lib/sidekiq/job_util.rb +17 -11
- data/lib/sidekiq/launcher.rb +63 -69
- data/lib/sidekiq/logger.rb +6 -45
- data/lib/sidekiq/manager.rb +33 -32
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +134 -0
- data/lib/sidekiq/middleware/chain.rb +84 -42
- data/lib/sidekiq/middleware/current_attributes.rb +18 -17
- data/lib/sidekiq/middleware/i18n.rb +6 -4
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +1 -1
- data/lib/sidekiq/paginator.rb +10 -2
- data/lib/sidekiq/processor.rb +56 -59
- data/lib/sidekiq/rails.rb +10 -9
- data/lib/sidekiq/redis_client_adapter.rb +118 -0
- data/lib/sidekiq/redis_connection.rb +13 -82
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +65 -37
- data/lib/sidekiq/testing/inline.rb +4 -4
- data/lib/sidekiq/testing.rb +41 -68
- data/lib/sidekiq/transaction_aware_client.rb +44 -0
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/action.rb +3 -3
- data/lib/sidekiq/web/application.rb +22 -6
- data/lib/sidekiq/web/csrf_protection.rb +3 -3
- data/lib/sidekiq/web/helpers.rb +21 -19
- data/lib/sidekiq/web.rb +3 -14
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +84 -207
- data/sidekiq.gemspec +29 -5
- data/web/assets/javascripts/application.js +58 -26
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +166 -0
- data/web/assets/javascripts/dashboard.js +3 -240
- data/web/assets/javascripts/metrics.js +236 -0
- data/web/assets/stylesheets/application-rtl.css +2 -91
- data/web/assets/stylesheets/application.css +64 -297
- data/web/locales/ar.yml +70 -70
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +52 -52
- data/web/locales/de.yml +65 -65
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +82 -69
- data/web/locales/es.yml +68 -68
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +67 -67
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +71 -68
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +66 -66
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +63 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +67 -66
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +67 -67
- data/web/locales/zh-cn.yml +37 -11
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +5 -2
- data/web/views/_nav.erb +1 -1
- data/web/views/_summary.erb +1 -1
- data/web/views/busy.erb +9 -4
- data/web/views/dashboard.erb +36 -4
- data/web/views/metrics.erb +80 -0
- data/web/views/metrics_for_job.erb +69 -0
- data/web/views/queue.erb +5 -1
- metadata +69 -22
- data/lib/sidekiq/delay.rb +0 -43
- data/lib/sidekiq/exception_handler.rb +0 -27
- data/lib/sidekiq/extensions/action_mailer.rb +0 -48
- data/lib/sidekiq/extensions/active_record.rb +0 -43
- data/lib/sidekiq/extensions/class_methods.rb +0 -43
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
- data/lib/sidekiq/util.rb +0 -108
- data/lib/sidekiq/worker.rb +0 -362
- /data/{LICENSE → LICENSE.txt} +0 -0
data/lib/sidekiq/job.rb
CHANGED
@@ -1,13 +1,378 @@
|
|
1
|
-
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/client"
|
2
4
|
|
3
5
|
module Sidekiq
|
4
|
-
|
5
|
-
#
|
6
|
-
#
|
7
|
-
#
|
8
|
-
#
|
9
|
-
#
|
10
|
-
#
|
11
|
-
#
|
12
|
-
|
6
|
+
##
|
7
|
+
# Include this module in your job class and you can easily create
|
8
|
+
# asynchronous jobs:
|
9
|
+
#
|
10
|
+
# class HardJob
|
11
|
+
# include Sidekiq::Job
|
12
|
+
# sidekiq_options queue: 'critical', retry: 5
|
13
|
+
#
|
14
|
+
# def perform(*args)
|
15
|
+
# # do some work
|
16
|
+
# end
|
17
|
+
# end
|
18
|
+
#
|
19
|
+
# Then in your Rails app, you can do this:
|
20
|
+
#
|
21
|
+
# HardJob.perform_async(1, 2, 3)
|
22
|
+
#
|
23
|
+
# Note that perform_async is a class method, perform is an instance method.
|
24
|
+
#
|
25
|
+
# Sidekiq::Job also includes several APIs to provide compatibility with
|
26
|
+
# ActiveJob.
|
27
|
+
#
|
28
|
+
# class SomeJob
|
29
|
+
# include Sidekiq::Job
|
30
|
+
# queue_as :critical
|
31
|
+
#
|
32
|
+
# def perform(...)
|
33
|
+
# end
|
34
|
+
# end
|
35
|
+
#
|
36
|
+
# SomeJob.set(wait_until: 1.hour).perform_async(123)
|
37
|
+
#
|
38
|
+
# Note that arguments passed to the job must still obey Sidekiq's
|
39
|
+
# best practice for simple, JSON-native data types. Sidekiq will not
|
40
|
+
# implement ActiveJob's more complex argument serialization. For
|
41
|
+
# this reason, we don't implement `perform_later` as our call semantics
|
42
|
+
# are very different.
|
43
|
+
#
|
44
|
+
module Job
|
45
|
+
##
|
46
|
+
# The Options module is extracted so we can include it in ActiveJob::Base
|
47
|
+
# and allow native AJs to configure Sidekiq features/internals.
|
48
|
+
module Options
|
49
|
+
def self.included(base)
|
50
|
+
base.extend(ClassMethods)
|
51
|
+
base.sidekiq_class_attribute :sidekiq_options_hash
|
52
|
+
base.sidekiq_class_attribute :sidekiq_retry_in_block
|
53
|
+
base.sidekiq_class_attribute :sidekiq_retries_exhausted_block
|
54
|
+
end
|
55
|
+
|
56
|
+
module ClassMethods
|
57
|
+
ACCESSOR_MUTEX = Mutex.new
|
58
|
+
|
59
|
+
##
|
60
|
+
# Allows customization for this type of Job.
|
61
|
+
# Legal options:
|
62
|
+
#
|
63
|
+
# queue - name of queue to use for this job type, default *default*
|
64
|
+
# retry - enable retries for this Job in case of error during execution,
|
65
|
+
# *true* to use the default or *Integer* count
|
66
|
+
# backtrace - whether to save any error backtrace in the retry payload to display in web UI,
|
67
|
+
# can be true, false or an integer number of lines to save, default *false*
|
68
|
+
#
|
69
|
+
# In practice, any option is allowed. This is the main mechanism to configure the
|
70
|
+
# options for a specific job.
|
71
|
+
def sidekiq_options(opts = {})
|
72
|
+
opts = opts.transform_keys(&:to_s) # stringify
|
73
|
+
self.sidekiq_options_hash = get_sidekiq_options.merge(opts)
|
74
|
+
end
|
75
|
+
|
76
|
+
def sidekiq_retry_in(&block)
|
77
|
+
self.sidekiq_retry_in_block = block
|
78
|
+
end
|
79
|
+
|
80
|
+
def sidekiq_retries_exhausted(&block)
|
81
|
+
self.sidekiq_retries_exhausted_block = block
|
82
|
+
end
|
83
|
+
|
84
|
+
def get_sidekiq_options # :nodoc:
|
85
|
+
self.sidekiq_options_hash ||= Sidekiq.default_job_options
|
86
|
+
end
|
87
|
+
|
88
|
+
def sidekiq_class_attribute(*attrs)
|
89
|
+
instance_reader = true
|
90
|
+
instance_writer = true
|
91
|
+
|
92
|
+
attrs.each do |name|
|
93
|
+
synchronized_getter = "__synchronized_#{name}"
|
94
|
+
|
95
|
+
singleton_class.instance_eval do
|
96
|
+
undef_method(name) if method_defined?(name) || private_method_defined?(name)
|
97
|
+
end
|
98
|
+
|
99
|
+
define_singleton_method(synchronized_getter) { nil }
|
100
|
+
singleton_class.class_eval do
|
101
|
+
private(synchronized_getter)
|
102
|
+
end
|
103
|
+
|
104
|
+
define_singleton_method(name) { ACCESSOR_MUTEX.synchronize { send synchronized_getter } }
|
105
|
+
|
106
|
+
ivar = "@#{name}"
|
107
|
+
|
108
|
+
singleton_class.instance_eval do
|
109
|
+
m = "#{name}="
|
110
|
+
undef_method(m) if method_defined?(m) || private_method_defined?(m)
|
111
|
+
end
|
112
|
+
define_singleton_method("#{name}=") do |val|
|
113
|
+
singleton_class.class_eval do
|
114
|
+
ACCESSOR_MUTEX.synchronize do
|
115
|
+
undef_method(synchronized_getter) if method_defined?(synchronized_getter) || private_method_defined?(synchronized_getter)
|
116
|
+
define_method(synchronized_getter) { val }
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
if singleton_class?
|
121
|
+
class_eval do
|
122
|
+
undef_method(name) if method_defined?(name) || private_method_defined?(name)
|
123
|
+
define_method(name) do
|
124
|
+
if instance_variable_defined? ivar
|
125
|
+
instance_variable_get ivar
|
126
|
+
else
|
127
|
+
singleton_class.send name
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
end
|
132
|
+
val
|
133
|
+
end
|
134
|
+
|
135
|
+
if instance_reader
|
136
|
+
undef_method(name) if method_defined?(name) || private_method_defined?(name)
|
137
|
+
define_method(name) do
|
138
|
+
if instance_variable_defined?(ivar)
|
139
|
+
instance_variable_get ivar
|
140
|
+
else
|
141
|
+
self.class.public_send name
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
if instance_writer
|
147
|
+
m = "#{name}="
|
148
|
+
undef_method(m) if method_defined?(m) || private_method_defined?(m)
|
149
|
+
attr_writer name
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
attr_accessor :jid
|
157
|
+
|
158
|
+
def self.included(base)
|
159
|
+
raise ArgumentError, "Sidekiq::Job cannot be included in an ActiveJob: #{base.name}" if base.ancestors.any? { |c| c.name == "ActiveJob::Base" }
|
160
|
+
|
161
|
+
base.include(Options)
|
162
|
+
base.extend(ClassMethods)
|
163
|
+
end
|
164
|
+
|
165
|
+
def logger
|
166
|
+
Sidekiq.logger
|
167
|
+
end
|
168
|
+
|
169
|
+
# This helper class encapsulates the set options for `set`, e.g.
|
170
|
+
#
|
171
|
+
# SomeJob.set(queue: 'foo').perform_async(....)
|
172
|
+
#
|
173
|
+
class Setter
|
174
|
+
include Sidekiq::JobUtil
|
175
|
+
|
176
|
+
def initialize(klass, opts)
|
177
|
+
@klass = klass
|
178
|
+
# NB: the internal hash always has stringified keys
|
179
|
+
@opts = opts.transform_keys(&:to_s)
|
180
|
+
|
181
|
+
# ActiveJob compatibility
|
182
|
+
interval = @opts.delete("wait_until") || @opts.delete("wait")
|
183
|
+
at(interval) if interval
|
184
|
+
end
|
185
|
+
|
186
|
+
def set(options)
|
187
|
+
hash = options.transform_keys(&:to_s)
|
188
|
+
interval = hash.delete("wait_until") || @opts.delete("wait")
|
189
|
+
@opts.merge!(hash)
|
190
|
+
at(interval) if interval
|
191
|
+
self
|
192
|
+
end
|
193
|
+
|
194
|
+
def perform_async(*args)
|
195
|
+
if @opts["sync"] == true
|
196
|
+
perform_inline(*args)
|
197
|
+
else
|
198
|
+
@klass.client_push(@opts.merge("args" => args, "class" => @klass))
|
199
|
+
end
|
200
|
+
end
|
201
|
+
|
202
|
+
# Explicit inline execution of a job. Returns nil if the job did not
|
203
|
+
# execute, true otherwise.
|
204
|
+
def perform_inline(*args)
|
205
|
+
raw = @opts.merge("args" => args, "class" => @klass)
|
206
|
+
|
207
|
+
# validate and normalize payload
|
208
|
+
item = normalize_item(raw)
|
209
|
+
queue = item["queue"]
|
210
|
+
|
211
|
+
# run client-side middleware
|
212
|
+
cfg = Sidekiq.default_configuration
|
213
|
+
result = cfg.client_middleware.invoke(item["class"], item, queue, cfg.redis_pool) do
|
214
|
+
item
|
215
|
+
end
|
216
|
+
return nil unless result
|
217
|
+
|
218
|
+
# round-trip the payload via JSON
|
219
|
+
msg = Sidekiq.load_json(Sidekiq.dump_json(item))
|
220
|
+
|
221
|
+
# prepare the job instance
|
222
|
+
klass = Object.const_get(msg["class"])
|
223
|
+
job = klass.new
|
224
|
+
job.jid = msg["jid"]
|
225
|
+
job.bid = msg["bid"] if job.respond_to?(:bid)
|
226
|
+
|
227
|
+
# run the job through server-side middleware
|
228
|
+
result = cfg.server_middleware.invoke(job, msg, msg["queue"]) do
|
229
|
+
# perform it
|
230
|
+
job.perform(*msg["args"])
|
231
|
+
true
|
232
|
+
end
|
233
|
+
return nil unless result
|
234
|
+
# jobs do not return a result. they should store any
|
235
|
+
# modified state.
|
236
|
+
true
|
237
|
+
end
|
238
|
+
alias_method :perform_sync, :perform_inline
|
239
|
+
|
240
|
+
def perform_bulk(args, batch_size: 1_000)
|
241
|
+
client = @klass.build_client
|
242
|
+
result = args.each_slice(batch_size).flat_map do |slice|
|
243
|
+
client.push_bulk(@opts.merge("class" => @klass, "args" => slice))
|
244
|
+
end
|
245
|
+
|
246
|
+
result.is_a?(Enumerator::Lazy) ? result.force : result
|
247
|
+
end
|
248
|
+
|
249
|
+
# +interval+ must be a timestamp, numeric or something that acts
|
250
|
+
# numeric (like an activesupport time interval).
|
251
|
+
def perform_in(interval, *args)
|
252
|
+
at(interval).perform_async(*args)
|
253
|
+
end
|
254
|
+
alias_method :perform_at, :perform_in
|
255
|
+
|
256
|
+
private
|
257
|
+
|
258
|
+
def at(interval)
|
259
|
+
int = interval.to_f
|
260
|
+
now = Time.now.to_f
|
261
|
+
ts = (int < 1_000_000_000 ? now + int : int)
|
262
|
+
# Optimization to enqueue something now that is scheduled to go out now or in the past
|
263
|
+
@opts["at"] = ts if ts > now
|
264
|
+
self
|
265
|
+
end
|
266
|
+
end
|
267
|
+
|
268
|
+
module ClassMethods
|
269
|
+
def delay(*args)
|
270
|
+
raise ArgumentError, "Do not call .delay on a Sidekiq::Job class, call .perform_async"
|
271
|
+
end
|
272
|
+
|
273
|
+
def delay_for(*args)
|
274
|
+
raise ArgumentError, "Do not call .delay_for on a Sidekiq::Job class, call .perform_in"
|
275
|
+
end
|
276
|
+
|
277
|
+
def delay_until(*args)
|
278
|
+
raise ArgumentError, "Do not call .delay_until on a Sidekiq::Job class, call .perform_at"
|
279
|
+
end
|
280
|
+
|
281
|
+
def queue_as(q)
|
282
|
+
sidekiq_options("queue" => q.to_s)
|
283
|
+
end
|
284
|
+
|
285
|
+
def set(options)
|
286
|
+
Setter.new(self, options)
|
287
|
+
end
|
288
|
+
|
289
|
+
def perform_async(*args)
|
290
|
+
Setter.new(self, {}).perform_async(*args)
|
291
|
+
end
|
292
|
+
|
293
|
+
# Inline execution of job's perform method after passing through Sidekiq.client_middleware and Sidekiq.server_middleware
|
294
|
+
def perform_inline(*args)
|
295
|
+
Setter.new(self, {}).perform_inline(*args)
|
296
|
+
end
|
297
|
+
alias_method :perform_sync, :perform_inline
|
298
|
+
|
299
|
+
##
|
300
|
+
# Push a large number of jobs to Redis, while limiting the batch of
|
301
|
+
# each job payload to 1,000. This method helps cut down on the number
|
302
|
+
# of round trips to Redis, which can increase the performance of enqueueing
|
303
|
+
# large numbers of jobs.
|
304
|
+
#
|
305
|
+
# +items+ must be an Array of Arrays.
|
306
|
+
#
|
307
|
+
# For finer-grained control, use `Sidekiq::Client.push_bulk` directly.
|
308
|
+
#
|
309
|
+
# Example (3 Redis round trips):
|
310
|
+
#
|
311
|
+
# SomeJob.perform_async(1)
|
312
|
+
# SomeJob.perform_async(2)
|
313
|
+
# SomeJob.perform_async(3)
|
314
|
+
#
|
315
|
+
# Would instead become (1 Redis round trip):
|
316
|
+
#
|
317
|
+
# SomeJob.perform_bulk([[1], [2], [3]])
|
318
|
+
#
|
319
|
+
def perform_bulk(*args, **kwargs)
|
320
|
+
Setter.new(self, {}).perform_bulk(*args, **kwargs)
|
321
|
+
end
|
322
|
+
|
323
|
+
# +interval+ must be a timestamp, numeric or something that acts
|
324
|
+
# numeric (like an activesupport time interval).
|
325
|
+
def perform_in(interval, *args)
|
326
|
+
int = interval.to_f
|
327
|
+
now = Time.now.to_f
|
328
|
+
ts = (int < 1_000_000_000 ? now + int : int)
|
329
|
+
|
330
|
+
item = {"class" => self, "args" => args}
|
331
|
+
|
332
|
+
# Optimization to enqueue something now that is scheduled to go out now or in the past
|
333
|
+
item["at"] = ts if ts > now
|
334
|
+
|
335
|
+
client_push(item)
|
336
|
+
end
|
337
|
+
alias_method :perform_at, :perform_in
|
338
|
+
|
339
|
+
##
|
340
|
+
# Allows customization for this type of Job.
|
341
|
+
# Legal options:
|
342
|
+
#
|
343
|
+
# queue - use a named queue for this Job, default 'default'
|
344
|
+
# retry - enable the RetryJobs middleware for this Job, *true* to use the default
|
345
|
+
# or *Integer* count
|
346
|
+
# backtrace - whether to save any error backtrace in the retry payload to display in web UI,
|
347
|
+
# can be true, false or an integer number of lines to save, default *false*
|
348
|
+
# pool - use the given Redis connection pool to push this type of job to a given shard.
|
349
|
+
#
|
350
|
+
# In practice, any option is allowed. This is the main mechanism to configure the
|
351
|
+
# options for a specific job.
|
352
|
+
def sidekiq_options(opts = {})
|
353
|
+
super
|
354
|
+
end
|
355
|
+
|
356
|
+
def client_push(item) # :nodoc:
|
357
|
+
raise ArgumentError, "Job payloads should contain no Symbols: #{item}" if item.any? { |k, v| k.is_a?(::Symbol) }
|
358
|
+
|
359
|
+
# allow the user to dynamically re-target jobs to another shard using the "pool" attribute
|
360
|
+
# FooJob.set(pool: SOME_POOL).perform_async
|
361
|
+
old = Thread.current[:sidekiq_redis_pool]
|
362
|
+
pool = item.delete("pool")
|
363
|
+
Thread.current[:sidekiq_redis_pool] = pool if pool
|
364
|
+
begin
|
365
|
+
build_client.push(item)
|
366
|
+
ensure
|
367
|
+
Thread.current[:sidekiq_redis_pool] = old
|
368
|
+
end
|
369
|
+
end
|
370
|
+
|
371
|
+
def build_client # :nodoc:
|
372
|
+
pool = Thread.current[:sidekiq_redis_pool] || get_sidekiq_options["pool"] || Sidekiq.default_configuration.redis_pool
|
373
|
+
client_class = get_sidekiq_options["client_class"] || Sidekiq::Client
|
374
|
+
client_class.new(pool: pool)
|
375
|
+
end
|
376
|
+
end
|
377
|
+
end
|
13
378
|
end
|
data/lib/sidekiq/job_logger.rb
CHANGED
data/lib/sidekiq/job_retry.rb
CHANGED
@@ -1,10 +1,8 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "sidekiq/scheduled"
|
4
|
-
require "sidekiq/api"
|
5
|
-
|
6
3
|
require "zlib"
|
7
4
|
require "base64"
|
5
|
+
require "sidekiq/component"
|
8
6
|
|
9
7
|
module Sidekiq
|
10
8
|
##
|
@@ -25,11 +23,11 @@ module Sidekiq
|
|
25
23
|
#
|
26
24
|
# A job looks like:
|
27
25
|
#
|
28
|
-
# { 'class' => '
|
26
|
+
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
|
29
27
|
#
|
30
28
|
# The 'retry' option also accepts a number (in place of 'true'):
|
31
29
|
#
|
32
|
-
# { 'class' => '
|
30
|
+
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
|
33
31
|
#
|
34
32
|
# The job will be retried this number of times before giving up. (If simply
|
35
33
|
# 'true', Sidekiq retries 25 times)
|
@@ -53,11 +51,11 @@ module Sidekiq
|
|
53
51
|
#
|
54
52
|
# Sidekiq.options[:max_retries] = 7
|
55
53
|
#
|
56
|
-
# or limit the number of retries for a particular
|
54
|
+
# or limit the number of retries for a particular job and send retries to
|
57
55
|
# a low priority queue with:
|
58
56
|
#
|
59
|
-
# class
|
60
|
-
# include Sidekiq::
|
57
|
+
# class MyJob
|
58
|
+
# include Sidekiq::Job
|
61
59
|
# sidekiq_options retry: 10, retry_queue: 'low'
|
62
60
|
# end
|
63
61
|
#
|
@@ -66,17 +64,18 @@ module Sidekiq
|
|
66
64
|
|
67
65
|
class Skip < Handled; end
|
68
66
|
|
69
|
-
include Sidekiq::
|
67
|
+
include Sidekiq::Component
|
70
68
|
|
71
69
|
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
72
70
|
|
73
|
-
def initialize(
|
74
|
-
@
|
71
|
+
def initialize(capsule)
|
72
|
+
@config = @capsule = capsule
|
73
|
+
@max_retries = Sidekiq.default_configuration[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
|
75
74
|
end
|
76
75
|
|
77
76
|
# The global retry handler requires only the barest of data.
|
78
77
|
# We want to be able to retry as much as possible so we don't
|
79
|
-
# require the
|
78
|
+
# require the job to be instantiated.
|
80
79
|
def global(jobstr, queue)
|
81
80
|
yield
|
82
81
|
rescue Handled => ex
|
@@ -90,9 +89,9 @@ module Sidekiq
|
|
90
89
|
|
91
90
|
msg = Sidekiq.load_json(jobstr)
|
92
91
|
if msg["retry"]
|
93
|
-
|
92
|
+
process_retry(nil, msg, queue, e)
|
94
93
|
else
|
95
|
-
|
94
|
+
@capsule.config.death_handlers.each do |handler|
|
96
95
|
handler.call(msg, e)
|
97
96
|
rescue => handler_ex
|
98
97
|
handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
|
@@ -103,14 +102,14 @@ module Sidekiq
|
|
103
102
|
end
|
104
103
|
|
105
104
|
# The local retry support means that any errors that occur within
|
106
|
-
# this block can be associated with the given
|
105
|
+
# this block can be associated with the given job instance.
|
107
106
|
# This is required to support the `sidekiq_retries_exhausted` block.
|
108
107
|
#
|
109
108
|
# Note that any exception from the block is wrapped in the Skip
|
110
109
|
# exception so the global block does not reprocess the error. The
|
111
110
|
# Skip exception is unwrapped within Sidekiq::Processor#process before
|
112
111
|
# calling the handle_exception handlers.
|
113
|
-
def local(
|
112
|
+
def local(jobinst, jobstr, queue)
|
114
113
|
yield
|
115
114
|
rescue Handled => ex
|
116
115
|
raise ex
|
@@ -123,11 +122,11 @@ module Sidekiq
|
|
123
122
|
|
124
123
|
msg = Sidekiq.load_json(jobstr)
|
125
124
|
if msg["retry"].nil?
|
126
|
-
msg["retry"] =
|
125
|
+
msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
|
127
126
|
end
|
128
127
|
|
129
128
|
raise e unless msg["retry"]
|
130
|
-
|
129
|
+
process_retry(jobinst, msg, queue, e)
|
131
130
|
# We've handled this error associated with this job, don't
|
132
131
|
# need to handle it at the global level
|
133
132
|
raise Skip
|
@@ -135,10 +134,10 @@ module Sidekiq
|
|
135
134
|
|
136
135
|
private
|
137
136
|
|
138
|
-
# Note that +
|
139
|
-
# instantiate the
|
137
|
+
# Note that +jobinst+ can be nil here if an error is raised before we can
|
138
|
+
# instantiate the job instance. All access must be guarded and
|
140
139
|
# best effort.
|
141
|
-
def
|
140
|
+
def process_retry(jobinst, msg, queue, exception)
|
142
141
|
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
|
143
142
|
|
144
143
|
msg["queue"] = (msg["retry_queue"] || queue)
|
@@ -169,24 +168,54 @@ module Sidekiq
|
|
169
168
|
msg["error_backtrace"] = compress_backtrace(lines)
|
170
169
|
end
|
171
170
|
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
171
|
+
# Goodbye dear message, you (re)tried your best I'm sure.
|
172
|
+
return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
|
173
|
+
|
174
|
+
strategy, delay = delay_for(jobinst, count, exception)
|
175
|
+
case strategy
|
176
|
+
when :discard
|
177
|
+
return # poof!
|
178
|
+
when :kill
|
179
|
+
return retries_exhausted(jobinst, msg, exception)
|
180
|
+
end
|
181
|
+
|
182
|
+
# Logging here can break retries if the logging device raises ENOSPC #3979
|
183
|
+
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
184
|
+
jitter = rand(10) * (count + 1)
|
185
|
+
retry_at = Time.now.to_f + delay + jitter
|
186
|
+
payload = Sidekiq.dump_json(msg)
|
187
|
+
redis do |conn|
|
188
|
+
conn.zadd("retry", retry_at.to_s, payload)
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
# returns (strategy, seconds)
|
193
|
+
def delay_for(jobinst, count, exception)
|
194
|
+
rv = begin
|
195
|
+
# sidekiq_retry_in can return two different things:
|
196
|
+
# 1. When to retry next, as an integer of seconds
|
197
|
+
# 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
|
198
|
+
jobinst&.sidekiq_retry_in_block&.call(count, exception)
|
199
|
+
rescue Exception => e
|
200
|
+
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
|
201
|
+
nil
|
202
|
+
end
|
203
|
+
|
204
|
+
delay = (count**4) + 15
|
205
|
+
if Integer === rv && rv > 0
|
206
|
+
delay = rv
|
207
|
+
elsif rv == :discard
|
208
|
+
return [:discard, nil] # do nothing, job goes poof
|
209
|
+
elsif rv == :kill
|
210
|
+
return [:kill, nil]
|
184
211
|
end
|
212
|
+
|
213
|
+
[:default, delay]
|
185
214
|
end
|
186
215
|
|
187
|
-
def retries_exhausted(
|
216
|
+
def retries_exhausted(jobinst, msg, exception)
|
188
217
|
begin
|
189
|
-
block =
|
218
|
+
block = jobinst&.sidekiq_retries_exhausted_block
|
190
219
|
block&.call(msg, exception)
|
191
220
|
rescue => e
|
192
221
|
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
|
@@ -194,7 +223,7 @@ module Sidekiq
|
|
194
223
|
|
195
224
|
send_to_morgue(msg) unless msg["dead"] == false
|
196
225
|
|
197
|
-
|
226
|
+
@capsule.config.death_handlers.each do |handler|
|
198
227
|
handler.call(msg, exception)
|
199
228
|
rescue => e
|
200
229
|
handle_exception(e, {context: "Error calling death handler", job: msg})
|
@@ -204,7 +233,15 @@ module Sidekiq
|
|
204
233
|
def send_to_morgue(msg)
|
205
234
|
logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
|
206
235
|
payload = Sidekiq.dump_json(msg)
|
207
|
-
|
236
|
+
now = Time.now.to_f
|
237
|
+
|
238
|
+
redis do |conn|
|
239
|
+
conn.multi do |xa|
|
240
|
+
xa.zadd("dead", now.to_s, payload)
|
241
|
+
xa.zremrangebyscore("dead", "-inf", now - @capsule.config[:dead_timeout_in_seconds])
|
242
|
+
xa.zremrangebyrank("dead", 0, - @capsule.config[:dead_max_jobs])
|
243
|
+
end
|
244
|
+
end
|
208
245
|
end
|
209
246
|
|
210
247
|
def retry_attempts_from(msg_retry, default)
|
@@ -215,22 +252,6 @@ module Sidekiq
|
|
215
252
|
end
|
216
253
|
end
|
217
254
|
|
218
|
-
def delay_for(worker, count, exception)
|
219
|
-
jitter = rand(10) * (count + 1)
|
220
|
-
if worker&.sidekiq_retry_in_block
|
221
|
-
custom_retry_in = retry_in(worker, count, exception).to_i
|
222
|
-
return custom_retry_in + jitter if custom_retry_in > 0
|
223
|
-
end
|
224
|
-
(count**4) + 15 + jitter
|
225
|
-
end
|
226
|
-
|
227
|
-
def retry_in(worker, count, exception)
|
228
|
-
worker.sidekiq_retry_in_block.call(count, exception)
|
229
|
-
rescue Exception => e
|
230
|
-
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
|
231
|
-
nil
|
232
|
-
end
|
233
|
-
|
234
255
|
def exception_caused_by_shutdown?(e, checked_causes = [])
|
235
256
|
return false unless e.cause
|
236
257
|
|
data/lib/sidekiq/job_util.rb
CHANGED
@@ -4,7 +4,8 @@ require "time"
|
|
4
4
|
module Sidekiq
|
5
5
|
module JobUtil
|
6
6
|
# These functions encapsulate various job utilities.
|
7
|
-
|
7
|
+
|
8
|
+
TRANSIENT_ATTRIBUTES = %w[]
|
8
9
|
|
9
10
|
def validate(item)
|
10
11
|
raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
|
@@ -12,16 +13,19 @@ module Sidekiq
|
|
12
13
|
raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
|
13
14
|
raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
|
14
15
|
raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
|
16
|
+
end
|
15
17
|
|
16
|
-
|
18
|
+
def verify_json(item)
|
19
|
+
job_class = item["wrapped"] || item["class"]
|
20
|
+
if Sidekiq::Config::DEFAULTS[:on_complex_arguments] == :raise
|
17
21
|
msg = <<~EOM
|
18
|
-
Job arguments to #{
|
19
|
-
To disable this error,
|
22
|
+
Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
|
23
|
+
To disable this error, add `Sidekiq.strict_args!(false)` to your initializer.
|
20
24
|
EOM
|
21
25
|
raise(ArgumentError, msg) unless json_safe?(item)
|
22
|
-
elsif Sidekiq
|
23
|
-
|
24
|
-
Job arguments to #{
|
26
|
+
elsif Sidekiq::Config::DEFAULTS[:on_complex_arguments] == :warn
|
27
|
+
warn <<~EOM unless json_safe?(item)
|
28
|
+
Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
|
25
29
|
Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
|
26
30
|
by calling `Sidekiq.strict_args!` during Sidekiq initialization.
|
27
31
|
EOM
|
@@ -39,20 +43,22 @@ module Sidekiq
|
|
39
43
|
|
40
44
|
raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
|
41
45
|
|
46
|
+
# remove job attributes which aren't necessary to persist into Redis
|
47
|
+
TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) }
|
48
|
+
|
49
|
+
item["jid"] ||= SecureRandom.hex(12)
|
42
50
|
item["class"] = item["class"].to_s
|
43
51
|
item["queue"] = item["queue"].to_s
|
44
|
-
item["jid"] ||= SecureRandom.hex(12)
|
45
52
|
item["created_at"] ||= Time.now.to_f
|
46
|
-
|
47
53
|
item
|
48
54
|
end
|
49
55
|
|
50
56
|
def normalized_hash(item_class)
|
51
57
|
if item_class.is_a?(Class)
|
52
|
-
raise(ArgumentError, "Message must include a Sidekiq::
|
58
|
+
raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
|
53
59
|
item_class.get_sidekiq_options
|
54
60
|
else
|
55
|
-
Sidekiq.
|
61
|
+
Sidekiq.default_job_options
|
56
62
|
end
|
57
63
|
end
|
58
64
|
|