sidekiq 6.5.12 → 7.2.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Changes.md +224 -20
- data/README.md +43 -35
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiq +3 -8
- data/bin/sidekiqload +204 -118
- data/bin/sidekiqmon +3 -0
- data/lib/sidekiq/api.rb +187 -135
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +59 -75
- data/lib/sidekiq/client.rb +66 -37
- data/lib/sidekiq/component.rb +4 -1
- data/lib/sidekiq/config.rb +287 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +11 -14
- data/lib/sidekiq/job.rb +371 -10
- data/lib/sidekiq/job_logger.rb +2 -2
- data/lib/sidekiq/job_retry.rb +36 -18
- data/lib/sidekiq/job_util.rb +51 -15
- data/lib/sidekiq/launcher.rb +71 -65
- data/lib/sidekiq/logger.rb +2 -27
- data/lib/sidekiq/manager.rb +9 -11
- data/lib/sidekiq/metrics/query.rb +7 -4
- data/lib/sidekiq/metrics/shared.rb +8 -7
- data/lib/sidekiq/metrics/tracking.rb +27 -21
- data/lib/sidekiq/middleware/chain.rb +19 -18
- data/lib/sidekiq/middleware/current_attributes.rb +52 -20
- data/lib/sidekiq/monitor.rb +16 -3
- data/lib/sidekiq/paginator.rb +2 -2
- data/lib/sidekiq/processor.rb +46 -51
- data/lib/sidekiq/rails.rb +15 -10
- data/lib/sidekiq/redis_client_adapter.rb +23 -66
- data/lib/sidekiq/redis_connection.rb +15 -117
- data/lib/sidekiq/scheduled.rb +22 -23
- data/lib/sidekiq/testing.rb +32 -41
- data/lib/sidekiq/transaction_aware_client.rb +11 -5
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/action.rb +8 -3
- data/lib/sidekiq/web/application.rb +108 -15
- data/lib/sidekiq/web/csrf_protection.rb +10 -7
- data/lib/sidekiq/web/helpers.rb +52 -38
- data/lib/sidekiq/web.rb +17 -16
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +76 -274
- data/sidekiq.gemspec +12 -10
- data/web/assets/javascripts/application.js +39 -0
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/dashboard-charts.js +182 -0
- data/web/assets/javascripts/dashboard.js +10 -232
- data/web/assets/javascripts/metrics.js +151 -115
- data/web/assets/stylesheets/application-dark.css +4 -0
- data/web/assets/stylesheets/application-rtl.css +10 -89
- data/web/assets/stylesheets/application.css +45 -298
- data/web/locales/ar.yml +70 -70
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -65
- data/web/locales/el.yml +2 -7
- data/web/locales/en.yml +78 -70
- data/web/locales/es.yml +68 -68
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +81 -67
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +67 -69
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +66 -66
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +79 -69
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +67 -66
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +67 -67
- data/web/locales/zh-cn.yml +20 -18
- data/web/locales/zh-tw.yml +10 -1
- data/web/views/_footer.erb +17 -2
- data/web/views/_job_info.erb +18 -2
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +1 -1
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +46 -35
- data/web/views/dashboard.erb +26 -5
- data/web/views/filtering.erb +7 -0
- data/web/views/metrics.erb +46 -24
- data/web/views/metrics_for_job.erb +41 -69
- data/web/views/morgue.erb +5 -9
- data/web/views/queue.erb +10 -14
- data/web/views/queues.erb +9 -3
- data/web/views/retries.erb +5 -9
- data/web/views/scheduled.erb +12 -13
- metadata +44 -38
- data/lib/sidekiq/delay.rb +0 -43
- data/lib/sidekiq/extensions/action_mailer.rb +0 -48
- data/lib/sidekiq/extensions/active_record.rb +0 -43
- data/lib/sidekiq/extensions/class_methods.rb +0 -43
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
- data/lib/sidekiq/metrics/deploy.rb +0 -47
- data/lib/sidekiq/worker.rb +0 -370
- data/web/assets/javascripts/graph.js +0 -16
- /data/{LICENSE → LICENSE.txt} +0 -0
data/lib/sidekiq/job.rb
CHANGED
@@ -1,13 +1,374 @@
|
|
1
|
-
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/client"
|
2
4
|
|
3
5
|
module Sidekiq
|
4
|
-
|
5
|
-
#
|
6
|
-
#
|
7
|
-
#
|
8
|
-
#
|
9
|
-
#
|
10
|
-
#
|
11
|
-
#
|
12
|
-
|
6
|
+
##
|
7
|
+
# Include this module in your job class and you can easily create
|
8
|
+
# asynchronous jobs:
|
9
|
+
#
|
10
|
+
# class HardJob
|
11
|
+
# include Sidekiq::Job
|
12
|
+
# sidekiq_options queue: 'critical', retry: 5
|
13
|
+
#
|
14
|
+
# def perform(*args)
|
15
|
+
# # do some work
|
16
|
+
# end
|
17
|
+
# end
|
18
|
+
#
|
19
|
+
# Then in your Rails app, you can do this:
|
20
|
+
#
|
21
|
+
# HardJob.perform_async(1, 2, 3)
|
22
|
+
#
|
23
|
+
# Note that perform_async is a class method, perform is an instance method.
|
24
|
+
#
|
25
|
+
# Sidekiq::Job also includes several APIs to provide compatibility with
|
26
|
+
# ActiveJob.
|
27
|
+
#
|
28
|
+
# class SomeJob
|
29
|
+
# include Sidekiq::Job
|
30
|
+
# queue_as :critical
|
31
|
+
#
|
32
|
+
# def perform(...)
|
33
|
+
# end
|
34
|
+
# end
|
35
|
+
#
|
36
|
+
# SomeJob.set(wait_until: 1.hour).perform_async(123)
|
37
|
+
#
|
38
|
+
# Note that arguments passed to the job must still obey Sidekiq's
|
39
|
+
# best practice for simple, JSON-native data types. Sidekiq will not
|
40
|
+
# implement ActiveJob's more complex argument serialization. For
|
41
|
+
# this reason, we don't implement `perform_later` as our call semantics
|
42
|
+
# are very different.
|
43
|
+
#
|
44
|
+
module Job
|
45
|
+
##
|
46
|
+
# The Options module is extracted so we can include it in ActiveJob::Base
|
47
|
+
# and allow native AJs to configure Sidekiq features/internals.
|
48
|
+
module Options
|
49
|
+
def self.included(base)
|
50
|
+
base.extend(ClassMethods)
|
51
|
+
base.sidekiq_class_attribute :sidekiq_options_hash
|
52
|
+
base.sidekiq_class_attribute :sidekiq_retry_in_block
|
53
|
+
base.sidekiq_class_attribute :sidekiq_retries_exhausted_block
|
54
|
+
end
|
55
|
+
|
56
|
+
module ClassMethods
|
57
|
+
ACCESSOR_MUTEX = Mutex.new
|
58
|
+
|
59
|
+
##
|
60
|
+
# Allows customization for this type of Job.
|
61
|
+
# Legal options:
|
62
|
+
#
|
63
|
+
# queue - name of queue to use for this job type, default *default*
|
64
|
+
# retry - enable retries for this Job in case of error during execution,
|
65
|
+
# *true* to use the default or *Integer* count
|
66
|
+
# backtrace - whether to save any error backtrace in the retry payload to display in web UI,
|
67
|
+
# can be true, false or an integer number of lines to save, default *false*
|
68
|
+
#
|
69
|
+
# In practice, any option is allowed. This is the main mechanism to configure the
|
70
|
+
# options for a specific job.
|
71
|
+
def sidekiq_options(opts = {})
|
72
|
+
opts = opts.transform_keys(&:to_s) # stringify
|
73
|
+
self.sidekiq_options_hash = get_sidekiq_options.merge(opts)
|
74
|
+
end
|
75
|
+
|
76
|
+
def sidekiq_retry_in(&block)
|
77
|
+
self.sidekiq_retry_in_block = block
|
78
|
+
end
|
79
|
+
|
80
|
+
def sidekiq_retries_exhausted(&block)
|
81
|
+
self.sidekiq_retries_exhausted_block = block
|
82
|
+
end
|
83
|
+
|
84
|
+
def get_sidekiq_options # :nodoc:
|
85
|
+
self.sidekiq_options_hash ||= Sidekiq.default_job_options
|
86
|
+
end
|
87
|
+
|
88
|
+
def sidekiq_class_attribute(*attrs)
|
89
|
+
instance_reader = true
|
90
|
+
instance_writer = true
|
91
|
+
|
92
|
+
attrs.each do |name|
|
93
|
+
synchronized_getter = "__synchronized_#{name}"
|
94
|
+
|
95
|
+
singleton_class.instance_eval do
|
96
|
+
undef_method(name) if method_defined?(name) || private_method_defined?(name)
|
97
|
+
end
|
98
|
+
|
99
|
+
define_singleton_method(synchronized_getter) { nil }
|
100
|
+
singleton_class.class_eval do
|
101
|
+
private(synchronized_getter)
|
102
|
+
end
|
103
|
+
|
104
|
+
define_singleton_method(name) { ACCESSOR_MUTEX.synchronize { send synchronized_getter } }
|
105
|
+
|
106
|
+
ivar = "@#{name}"
|
107
|
+
|
108
|
+
singleton_class.instance_eval do
|
109
|
+
m = "#{name}="
|
110
|
+
undef_method(m) if method_defined?(m) || private_method_defined?(m)
|
111
|
+
end
|
112
|
+
define_singleton_method(:"#{name}=") do |val|
|
113
|
+
singleton_class.class_eval do
|
114
|
+
ACCESSOR_MUTEX.synchronize do
|
115
|
+
undef_method(synchronized_getter) if method_defined?(synchronized_getter) || private_method_defined?(synchronized_getter)
|
116
|
+
define_method(synchronized_getter) { val }
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
if singleton_class?
|
121
|
+
class_eval do
|
122
|
+
undef_method(name) if method_defined?(name) || private_method_defined?(name)
|
123
|
+
define_method(name) do
|
124
|
+
if instance_variable_defined? ivar
|
125
|
+
instance_variable_get ivar
|
126
|
+
else
|
127
|
+
singleton_class.send name
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
end
|
132
|
+
val
|
133
|
+
end
|
134
|
+
|
135
|
+
if instance_reader
|
136
|
+
undef_method(name) if method_defined?(name) || private_method_defined?(name)
|
137
|
+
define_method(name) do
|
138
|
+
if instance_variable_defined?(ivar)
|
139
|
+
instance_variable_get ivar
|
140
|
+
else
|
141
|
+
self.class.public_send name
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
if instance_writer
|
147
|
+
m = "#{name}="
|
148
|
+
undef_method(m) if method_defined?(m) || private_method_defined?(m)
|
149
|
+
attr_writer name
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
attr_accessor :jid
|
157
|
+
|
158
|
+
def self.included(base)
|
159
|
+
raise ArgumentError, "Sidekiq::Job cannot be included in an ActiveJob: #{base.name}" if base.ancestors.any? { |c| c.name == "ActiveJob::Base" }
|
160
|
+
|
161
|
+
base.include(Options)
|
162
|
+
base.extend(ClassMethods)
|
163
|
+
end
|
164
|
+
|
165
|
+
def logger
|
166
|
+
Sidekiq.logger
|
167
|
+
end
|
168
|
+
|
169
|
+
# This helper class encapsulates the set options for `set`, e.g.
|
170
|
+
#
|
171
|
+
# SomeJob.set(queue: 'foo').perform_async(....)
|
172
|
+
#
|
173
|
+
class Setter
|
174
|
+
include Sidekiq::JobUtil
|
175
|
+
|
176
|
+
def initialize(klass, opts)
|
177
|
+
@klass = klass
|
178
|
+
# NB: the internal hash always has stringified keys
|
179
|
+
@opts = opts.transform_keys(&:to_s)
|
180
|
+
|
181
|
+
# ActiveJob compatibility
|
182
|
+
interval = @opts.delete("wait_until") || @opts.delete("wait")
|
183
|
+
at(interval) if interval
|
184
|
+
end
|
185
|
+
|
186
|
+
def set(options)
|
187
|
+
hash = options.transform_keys(&:to_s)
|
188
|
+
interval = hash.delete("wait_until") || @opts.delete("wait")
|
189
|
+
@opts.merge!(hash)
|
190
|
+
at(interval) if interval
|
191
|
+
self
|
192
|
+
end
|
193
|
+
|
194
|
+
def perform_async(*args)
|
195
|
+
if @opts["sync"] == true
|
196
|
+
perform_inline(*args)
|
197
|
+
else
|
198
|
+
@klass.client_push(@opts.merge("args" => args, "class" => @klass))
|
199
|
+
end
|
200
|
+
end
|
201
|
+
|
202
|
+
# Explicit inline execution of a job. Returns nil if the job did not
|
203
|
+
# execute, true otherwise.
|
204
|
+
def perform_inline(*args)
|
205
|
+
raw = @opts.merge("args" => args, "class" => @klass)
|
206
|
+
|
207
|
+
# validate and normalize payload
|
208
|
+
item = normalize_item(raw)
|
209
|
+
queue = item["queue"]
|
210
|
+
|
211
|
+
# run client-side middleware
|
212
|
+
cfg = Sidekiq.default_configuration
|
213
|
+
result = cfg.client_middleware.invoke(item["class"], item, queue, cfg.redis_pool) do
|
214
|
+
item
|
215
|
+
end
|
216
|
+
return nil unless result
|
217
|
+
|
218
|
+
# round-trip the payload via JSON
|
219
|
+
msg = Sidekiq.load_json(Sidekiq.dump_json(item))
|
220
|
+
|
221
|
+
# prepare the job instance
|
222
|
+
klass = Object.const_get(msg["class"])
|
223
|
+
job = klass.new
|
224
|
+
job.jid = msg["jid"]
|
225
|
+
job.bid = msg["bid"] if job.respond_to?(:bid)
|
226
|
+
|
227
|
+
# run the job through server-side middleware
|
228
|
+
result = cfg.server_middleware.invoke(job, msg, msg["queue"]) do
|
229
|
+
# perform it
|
230
|
+
job.perform(*msg["args"])
|
231
|
+
true
|
232
|
+
end
|
233
|
+
return nil unless result
|
234
|
+
# jobs do not return a result. they should store any
|
235
|
+
# modified state.
|
236
|
+
true
|
237
|
+
end
|
238
|
+
alias_method :perform_sync, :perform_inline
|
239
|
+
|
240
|
+
def perform_bulk(args, batch_size: 1_000)
|
241
|
+
client = @klass.build_client
|
242
|
+
client.push_bulk(@opts.merge("class" => @klass, "args" => args, :batch_size => batch_size))
|
243
|
+
end
|
244
|
+
|
245
|
+
# +interval+ must be a timestamp, numeric or something that acts
|
246
|
+
# numeric (like an activesupport time interval).
|
247
|
+
def perform_in(interval, *args)
|
248
|
+
at(interval).perform_async(*args)
|
249
|
+
end
|
250
|
+
alias_method :perform_at, :perform_in
|
251
|
+
|
252
|
+
private
|
253
|
+
|
254
|
+
def at(interval)
|
255
|
+
int = interval.to_f
|
256
|
+
now = Time.now.to_f
|
257
|
+
ts = ((int < 1_000_000_000) ? now + int : int)
|
258
|
+
# Optimization to enqueue something now that is scheduled to go out now or in the past
|
259
|
+
@opts["at"] = ts if ts > now
|
260
|
+
self
|
261
|
+
end
|
262
|
+
end
|
263
|
+
|
264
|
+
module ClassMethods
|
265
|
+
def delay(*args)
|
266
|
+
raise ArgumentError, "Do not call .delay on a Sidekiq::Job class, call .perform_async"
|
267
|
+
end
|
268
|
+
|
269
|
+
def delay_for(*args)
|
270
|
+
raise ArgumentError, "Do not call .delay_for on a Sidekiq::Job class, call .perform_in"
|
271
|
+
end
|
272
|
+
|
273
|
+
def delay_until(*args)
|
274
|
+
raise ArgumentError, "Do not call .delay_until on a Sidekiq::Job class, call .perform_at"
|
275
|
+
end
|
276
|
+
|
277
|
+
def queue_as(q)
|
278
|
+
sidekiq_options("queue" => q.to_s)
|
279
|
+
end
|
280
|
+
|
281
|
+
def set(options)
|
282
|
+
Setter.new(self, options)
|
283
|
+
end
|
284
|
+
|
285
|
+
def perform_async(*args)
|
286
|
+
Setter.new(self, {}).perform_async(*args)
|
287
|
+
end
|
288
|
+
|
289
|
+
# Inline execution of job's perform method after passing through Sidekiq.client_middleware and Sidekiq.server_middleware
|
290
|
+
def perform_inline(*args)
|
291
|
+
Setter.new(self, {}).perform_inline(*args)
|
292
|
+
end
|
293
|
+
alias_method :perform_sync, :perform_inline
|
294
|
+
|
295
|
+
##
|
296
|
+
# Push a large number of jobs to Redis, while limiting the batch of
|
297
|
+
# each job payload to 1,000. This method helps cut down on the number
|
298
|
+
# of round trips to Redis, which can increase the performance of enqueueing
|
299
|
+
# large numbers of jobs.
|
300
|
+
#
|
301
|
+
# +items+ must be an Array of Arrays.
|
302
|
+
#
|
303
|
+
# For finer-grained control, use `Sidekiq::Client.push_bulk` directly.
|
304
|
+
#
|
305
|
+
# Example (3 Redis round trips):
|
306
|
+
#
|
307
|
+
# SomeJob.perform_async(1)
|
308
|
+
# SomeJob.perform_async(2)
|
309
|
+
# SomeJob.perform_async(3)
|
310
|
+
#
|
311
|
+
# Would instead become (1 Redis round trip):
|
312
|
+
#
|
313
|
+
# SomeJob.perform_bulk([[1], [2], [3]])
|
314
|
+
#
|
315
|
+
def perform_bulk(*args, **kwargs)
|
316
|
+
Setter.new(self, {}).perform_bulk(*args, **kwargs)
|
317
|
+
end
|
318
|
+
|
319
|
+
# +interval+ must be a timestamp, numeric or something that acts
|
320
|
+
# numeric (like an activesupport time interval).
|
321
|
+
def perform_in(interval, *args)
|
322
|
+
int = interval.to_f
|
323
|
+
now = Time.now.to_f
|
324
|
+
ts = ((int < 1_000_000_000) ? now + int : int)
|
325
|
+
|
326
|
+
item = {"class" => self, "args" => args}
|
327
|
+
|
328
|
+
# Optimization to enqueue something now that is scheduled to go out now or in the past
|
329
|
+
item["at"] = ts if ts > now
|
330
|
+
|
331
|
+
client_push(item)
|
332
|
+
end
|
333
|
+
alias_method :perform_at, :perform_in
|
334
|
+
|
335
|
+
##
|
336
|
+
# Allows customization for this type of Job.
|
337
|
+
# Legal options:
|
338
|
+
#
|
339
|
+
# queue - use a named queue for this Job, default 'default'
|
340
|
+
# retry - enable the RetryJobs middleware for this Job, *true* to use the default
|
341
|
+
# or *Integer* count
|
342
|
+
# backtrace - whether to save any error backtrace in the retry payload to display in web UI,
|
343
|
+
# can be true, false or an integer number of lines to save, default *false*
|
344
|
+
# pool - use the given Redis connection pool to push this type of job to a given shard.
|
345
|
+
#
|
346
|
+
# In practice, any option is allowed. This is the main mechanism to configure the
|
347
|
+
# options for a specific job.
|
348
|
+
def sidekiq_options(opts = {})
|
349
|
+
super
|
350
|
+
end
|
351
|
+
|
352
|
+
def client_push(item) # :nodoc:
|
353
|
+
raise ArgumentError, "Job payloads should contain no Symbols: #{item}" if item.any? { |k, v| k.is_a?(::Symbol) }
|
354
|
+
|
355
|
+
# allow the user to dynamically re-target jobs to another shard using the "pool" attribute
|
356
|
+
# FooJob.set(pool: SOME_POOL).perform_async
|
357
|
+
old = Thread.current[:sidekiq_redis_pool]
|
358
|
+
pool = item.delete("pool")
|
359
|
+
Thread.current[:sidekiq_redis_pool] = pool if pool
|
360
|
+
begin
|
361
|
+
build_client.push(item)
|
362
|
+
ensure
|
363
|
+
Thread.current[:sidekiq_redis_pool] = old
|
364
|
+
end
|
365
|
+
end
|
366
|
+
|
367
|
+
def build_client # :nodoc:
|
368
|
+
pool = Thread.current[:sidekiq_redis_pool] || get_sidekiq_options["pool"] || Sidekiq.default_configuration.redis_pool
|
369
|
+
client_class = get_sidekiq_options["client_class"] || Sidekiq::Client
|
370
|
+
client_class.new(pool: pool)
|
371
|
+
end
|
372
|
+
end
|
373
|
+
end
|
13
374
|
end
|
data/lib/sidekiq/job_logger.rb
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
module Sidekiq
|
4
4
|
class JobLogger
|
5
|
-
def initialize(logger
|
5
|
+
def initialize(logger)
|
6
6
|
@logger = logger
|
7
7
|
end
|
8
8
|
|
@@ -33,7 +33,7 @@ module Sidekiq
|
|
33
33
|
|
34
34
|
Thread.current[:sidekiq_context] = h
|
35
35
|
level = job_hash["log_level"]
|
36
|
-
if level
|
36
|
+
if level && @logger.respond_to?(:log_at)
|
37
37
|
@logger.log_at(level, &block)
|
38
38
|
else
|
39
39
|
yield
|
data/lib/sidekiq/job_retry.rb
CHANGED
@@ -1,7 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require "zlib"
|
4
|
-
require "base64"
|
5
4
|
require "sidekiq/component"
|
6
5
|
|
7
6
|
module Sidekiq
|
@@ -49,7 +48,7 @@ module Sidekiq
|
|
49
48
|
# The default number of retries is 25 which works out to about 3 weeks
|
50
49
|
# You can change the default maximum number of retries in your initializer:
|
51
50
|
#
|
52
|
-
# Sidekiq.
|
51
|
+
# Sidekiq.default_configuration[:max_retries] = 7
|
53
52
|
#
|
54
53
|
# or limit the number of retries for a particular job and send retries to
|
55
54
|
# a low priority queue with:
|
@@ -68,9 +67,10 @@ module Sidekiq
|
|
68
67
|
|
69
68
|
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
70
69
|
|
71
|
-
def initialize(
|
72
|
-
@config =
|
73
|
-
@max_retries =
|
70
|
+
def initialize(capsule)
|
71
|
+
@config = @capsule = capsule
|
72
|
+
@max_retries = Sidekiq.default_configuration[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
|
73
|
+
@backtrace_cleaner = Sidekiq.default_configuration[:backtrace_cleaner]
|
74
74
|
end
|
75
75
|
|
76
76
|
# The global retry handler requires only the barest of data.
|
@@ -91,7 +91,7 @@ module Sidekiq
|
|
91
91
|
if msg["retry"]
|
92
92
|
process_retry(nil, msg, queue, e)
|
93
93
|
else
|
94
|
-
|
94
|
+
@capsule.config.death_handlers.each do |handler|
|
95
95
|
handler.call(msg, e)
|
96
96
|
rescue => handler_ex
|
97
97
|
handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
|
@@ -159,19 +159,22 @@ module Sidekiq
|
|
159
159
|
end
|
160
160
|
|
161
161
|
if msg["backtrace"]
|
162
|
+
backtrace = @backtrace_cleaner.call(exception.backtrace)
|
162
163
|
lines = if msg["backtrace"] == true
|
163
|
-
|
164
|
+
backtrace
|
164
165
|
else
|
165
|
-
|
166
|
+
backtrace[0...msg["backtrace"].to_i]
|
166
167
|
end
|
167
168
|
|
168
169
|
msg["error_backtrace"] = compress_backtrace(lines)
|
169
170
|
end
|
170
171
|
|
171
|
-
# Goodbye dear message, you (re)tried your best I'm sure.
|
172
172
|
return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
|
173
173
|
|
174
|
-
|
174
|
+
rf = msg["retry_for"]
|
175
|
+
return retries_exhausted(jobinst, msg, exception) if rf && ((msg["failed_at"] + rf) < Time.now.to_f)
|
176
|
+
|
177
|
+
strategy, delay = delay_for(jobinst, count, exception, msg)
|
175
178
|
case strategy
|
176
179
|
when :discard
|
177
180
|
return # poof!
|
@@ -190,17 +193,25 @@ module Sidekiq
|
|
190
193
|
end
|
191
194
|
|
192
195
|
# returns (strategy, seconds)
|
193
|
-
def delay_for(jobinst, count, exception)
|
196
|
+
def delay_for(jobinst, count, exception, msg)
|
194
197
|
rv = begin
|
195
198
|
# sidekiq_retry_in can return two different things:
|
196
199
|
# 1. When to retry next, as an integer of seconds
|
197
200
|
# 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
|
198
|
-
jobinst&.sidekiq_retry_in_block
|
201
|
+
block = jobinst&.sidekiq_retry_in_block
|
202
|
+
|
203
|
+
# the sidekiq_retry_in_block can be defined in a wrapped class (ActiveJob for instance)
|
204
|
+
unless msg["wrapped"].nil?
|
205
|
+
wrapped = Object.const_get(msg["wrapped"])
|
206
|
+
block = wrapped.respond_to?(:sidekiq_retry_in_block) ? wrapped.sidekiq_retry_in_block : nil
|
207
|
+
end
|
208
|
+
block&.call(count, exception, msg)
|
199
209
|
rescue Exception => e
|
200
210
|
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
|
201
211
|
nil
|
202
212
|
end
|
203
213
|
|
214
|
+
rv = rv.to_i if rv.respond_to?(:to_i)
|
204
215
|
delay = (count**4) + 15
|
205
216
|
if Integer === rv && rv > 0
|
206
217
|
delay = rv
|
@@ -214,16 +225,23 @@ module Sidekiq
|
|
214
225
|
end
|
215
226
|
|
216
227
|
def retries_exhausted(jobinst, msg, exception)
|
217
|
-
begin
|
228
|
+
rv = begin
|
218
229
|
block = jobinst&.sidekiq_retries_exhausted_block
|
230
|
+
|
231
|
+
# the sidekiq_retries_exhausted_block can be defined in a wrapped class (ActiveJob for instance)
|
232
|
+
unless msg["wrapped"].nil?
|
233
|
+
wrapped = Object.const_get(msg["wrapped"])
|
234
|
+
block = wrapped.respond_to?(:sidekiq_retries_exhausted_block) ? wrapped.sidekiq_retries_exhausted_block : nil
|
235
|
+
end
|
219
236
|
block&.call(msg, exception)
|
220
237
|
rescue => e
|
221
238
|
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
|
222
239
|
end
|
223
240
|
|
241
|
+
return if rv == :discard # poof!
|
224
242
|
send_to_morgue(msg) unless msg["dead"] == false
|
225
243
|
|
226
|
-
config.death_handlers.each do |handler|
|
244
|
+
@capsule.config.death_handlers.each do |handler|
|
227
245
|
handler.call(msg, exception)
|
228
246
|
rescue => e
|
229
247
|
handle_exception(e, {context: "Error calling death handler", job: msg})
|
@@ -235,11 +253,11 @@ module Sidekiq
|
|
235
253
|
payload = Sidekiq.dump_json(msg)
|
236
254
|
now = Time.now.to_f
|
237
255
|
|
238
|
-
|
256
|
+
redis do |conn|
|
239
257
|
conn.multi do |xa|
|
240
258
|
xa.zadd("dead", now.to_s, payload)
|
241
|
-
xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds])
|
242
|
-
xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs])
|
259
|
+
xa.zremrangebyscore("dead", "-inf", now - @capsule.config[:dead_timeout_in_seconds])
|
260
|
+
xa.zremrangebyrank("dead", 0, - @capsule.config[:dead_max_jobs])
|
243
261
|
end
|
244
262
|
end
|
245
263
|
end
|
@@ -276,7 +294,7 @@ module Sidekiq
|
|
276
294
|
def compress_backtrace(backtrace)
|
277
295
|
serialized = Sidekiq.dump_json(backtrace)
|
278
296
|
compressed = Zlib::Deflate.deflate(serialized)
|
279
|
-
|
297
|
+
[compressed].pack("m0") # Base64.strict_encode64
|
280
298
|
end
|
281
299
|
end
|
282
300
|
end
|
data/lib/sidekiq/job_util.rb
CHANGED
@@ -9,26 +9,32 @@ module Sidekiq
|
|
9
9
|
|
10
10
|
def validate(item)
|
11
11
|
raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
|
12
|
-
raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
|
12
|
+
raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array) || item["args"].is_a?(Enumerator::Lazy)
|
13
13
|
raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
|
14
14
|
raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
|
15
15
|
raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
|
16
|
+
raise(ArgumentError, "retry_for must be a relative amount of time, e.g. 48.hours `#{item}`") if item["retry_for"] && item["retry_for"] > 1_000_000_000
|
16
17
|
end
|
17
18
|
|
18
19
|
def verify_json(item)
|
19
20
|
job_class = item["wrapped"] || item["class"]
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
21
|
+
args = item["args"]
|
22
|
+
mode = Sidekiq::Config::DEFAULTS[:on_complex_arguments]
|
23
|
+
|
24
|
+
if mode == :raise || mode == :warn
|
25
|
+
if (unsafe_item = json_unsafe?(args))
|
26
|
+
msg = <<~EOM
|
27
|
+
Job arguments to #{job_class} must be native JSON types, but #{unsafe_item.inspect} is a #{unsafe_item.class}.
|
28
|
+
See https://github.com/sidekiq/sidekiq/wiki/Best-Practices
|
29
|
+
To disable this error, add `Sidekiq.strict_args!(false)` to your initializer.
|
30
|
+
EOM
|
31
|
+
|
32
|
+
if mode == :raise
|
33
|
+
raise(ArgumentError, msg)
|
34
|
+
else
|
35
|
+
warn(msg)
|
36
|
+
end
|
37
|
+
end
|
32
38
|
end
|
33
39
|
end
|
34
40
|
|
@@ -49,6 +55,7 @@ module Sidekiq
|
|
49
55
|
item["jid"] ||= SecureRandom.hex(12)
|
50
56
|
item["class"] = item["class"].to_s
|
51
57
|
item["queue"] = item["queue"].to_s
|
58
|
+
item["retry_for"] = item["retry_for"].to_i if item["retry_for"]
|
52
59
|
item["created_at"] ||= Time.now.to_f
|
53
60
|
item
|
54
61
|
end
|
@@ -64,8 +71,37 @@ module Sidekiq
|
|
64
71
|
|
65
72
|
private
|
66
73
|
|
67
|
-
|
68
|
-
|
74
|
+
RECURSIVE_JSON_UNSAFE = {
|
75
|
+
Integer => ->(val) {},
|
76
|
+
Float => ->(val) {},
|
77
|
+
TrueClass => ->(val) {},
|
78
|
+
FalseClass => ->(val) {},
|
79
|
+
NilClass => ->(val) {},
|
80
|
+
String => ->(val) {},
|
81
|
+
Array => ->(val) {
|
82
|
+
val.each do |e|
|
83
|
+
unsafe_item = RECURSIVE_JSON_UNSAFE[e.class].call(e)
|
84
|
+
return unsafe_item unless unsafe_item.nil?
|
85
|
+
end
|
86
|
+
nil
|
87
|
+
},
|
88
|
+
Hash => ->(val) {
|
89
|
+
val.each do |k, v|
|
90
|
+
return k unless String === k
|
91
|
+
|
92
|
+
unsafe_item = RECURSIVE_JSON_UNSAFE[v.class].call(v)
|
93
|
+
return unsafe_item unless unsafe_item.nil?
|
94
|
+
end
|
95
|
+
nil
|
96
|
+
}
|
97
|
+
}
|
98
|
+
|
99
|
+
RECURSIVE_JSON_UNSAFE.default = ->(val) { val }
|
100
|
+
RECURSIVE_JSON_UNSAFE.compare_by_identity
|
101
|
+
private_constant :RECURSIVE_JSON_UNSAFE
|
102
|
+
|
103
|
+
def json_unsafe?(item)
|
104
|
+
RECURSIVE_JSON_UNSAFE[item.class].call(item)
|
69
105
|
end
|
70
106
|
end
|
71
107
|
end
|