sidekiq 6.0.4 → 7.2.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (147) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +491 -10
  3. data/LICENSE.txt +9 -0
  4. data/README.md +47 -38
  5. data/bin/sidekiq +22 -3
  6. data/bin/sidekiqload +207 -117
  7. data/bin/sidekiqmon +4 -1
  8. data/lib/generators/sidekiq/job_generator.rb +57 -0
  9. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  10. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  11. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  12. data/lib/sidekiq/api.rb +418 -233
  13. data/lib/sidekiq/capsule.rb +127 -0
  14. data/lib/sidekiq/cli.rb +122 -86
  15. data/lib/sidekiq/client.rb +109 -97
  16. data/lib/sidekiq/{util.rb → component.rb} +14 -13
  17. data/lib/sidekiq/config.rb +287 -0
  18. data/lib/sidekiq/deploy.rb +62 -0
  19. data/lib/sidekiq/embedded.rb +61 -0
  20. data/lib/sidekiq/fetch.rb +43 -35
  21. data/lib/sidekiq/{worker.rb → job.rb} +161 -34
  22. data/lib/sidekiq/job_logger.rb +18 -30
  23. data/lib/sidekiq/job_retry.rb +102 -63
  24. data/lib/sidekiq/job_util.rb +107 -0
  25. data/lib/sidekiq/launcher.rb +180 -88
  26. data/lib/sidekiq/logger.rb +13 -47
  27. data/lib/sidekiq/manager.rb +40 -41
  28. data/lib/sidekiq/metrics/query.rb +155 -0
  29. data/lib/sidekiq/metrics/shared.rb +95 -0
  30. data/lib/sidekiq/metrics/tracking.rb +136 -0
  31. data/lib/sidekiq/middleware/chain.rb +99 -52
  32. data/lib/sidekiq/middleware/current_attributes.rb +95 -0
  33. data/lib/sidekiq/middleware/i18n.rb +6 -4
  34. data/lib/sidekiq/middleware/modules.rb +21 -0
  35. data/lib/sidekiq/monitor.rb +18 -5
  36. data/lib/sidekiq/paginator.rb +17 -9
  37. data/lib/sidekiq/processor.rb +81 -80
  38. data/lib/sidekiq/rails.rb +37 -21
  39. data/lib/sidekiq/redis_client_adapter.rb +111 -0
  40. data/lib/sidekiq/redis_connection.rb +22 -87
  41. data/lib/sidekiq/ring_buffer.rb +29 -0
  42. data/lib/sidekiq/scheduled.rb +102 -39
  43. data/lib/sidekiq/sd_notify.rb +149 -0
  44. data/lib/sidekiq/systemd.rb +24 -0
  45. data/lib/sidekiq/testing/inline.rb +4 -4
  46. data/lib/sidekiq/testing.rb +68 -78
  47. data/lib/sidekiq/transaction_aware_client.rb +44 -0
  48. data/lib/sidekiq/version.rb +2 -1
  49. data/lib/sidekiq/web/action.rb +3 -3
  50. data/lib/sidekiq/web/application.rb +132 -28
  51. data/lib/sidekiq/web/csrf_protection.rb +180 -0
  52. data/lib/sidekiq/web/helpers.rb +93 -65
  53. data/lib/sidekiq/web/router.rb +6 -5
  54. data/lib/sidekiq/web.rb +43 -74
  55. data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
  56. data/lib/sidekiq.rb +86 -199
  57. data/sidekiq.gemspec +17 -8
  58. data/web/assets/images/apple-touch-icon.png +0 -0
  59. data/web/assets/javascripts/application.js +146 -61
  60. data/web/assets/javascripts/base-charts.js +106 -0
  61. data/web/assets/javascripts/chart.min.js +13 -0
  62. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  63. data/web/assets/javascripts/dashboard-charts.js +182 -0
  64. data/web/assets/javascripts/dashboard.js +35 -274
  65. data/web/assets/javascripts/metrics.js +298 -0
  66. data/web/assets/stylesheets/application-dark.css +146 -124
  67. data/web/assets/stylesheets/application-rtl.css +2 -95
  68. data/web/assets/stylesheets/application.css +109 -529
  69. data/web/locales/ar.yml +71 -65
  70. data/web/locales/cs.yml +62 -62
  71. data/web/locales/da.yml +60 -53
  72. data/web/locales/de.yml +65 -65
  73. data/web/locales/el.yml +43 -24
  74. data/web/locales/en.yml +85 -67
  75. data/web/locales/es.yml +70 -54
  76. data/web/locales/fa.yml +65 -65
  77. data/web/locales/fr.yml +83 -62
  78. data/web/locales/gd.yml +99 -0
  79. data/web/locales/he.yml +65 -64
  80. data/web/locales/hi.yml +59 -59
  81. data/web/locales/it.yml +53 -53
  82. data/web/locales/ja.yml +75 -65
  83. data/web/locales/ko.yml +52 -52
  84. data/web/locales/lt.yml +83 -0
  85. data/web/locales/nb.yml +61 -61
  86. data/web/locales/nl.yml +52 -52
  87. data/web/locales/pl.yml +45 -45
  88. data/web/locales/pt-br.yml +83 -55
  89. data/web/locales/pt.yml +51 -51
  90. data/web/locales/ru.yml +68 -63
  91. data/web/locales/sv.yml +53 -53
  92. data/web/locales/ta.yml +60 -60
  93. data/web/locales/uk.yml +62 -61
  94. data/web/locales/ur.yml +64 -64
  95. data/web/locales/vi.yml +83 -0
  96. data/web/locales/zh-cn.yml +43 -16
  97. data/web/locales/zh-tw.yml +42 -8
  98. data/web/views/_footer.erb +6 -3
  99. data/web/views/_job_info.erb +19 -3
  100. data/web/views/_metrics_period_select.erb +12 -0
  101. data/web/views/_nav.erb +1 -1
  102. data/web/views/_paging.erb +2 -0
  103. data/web/views/_poll_link.erb +3 -6
  104. data/web/views/_summary.erb +7 -7
  105. data/web/views/busy.erb +73 -26
  106. data/web/views/dashboard.erb +48 -18
  107. data/web/views/dead.erb +1 -1
  108. data/web/views/filtering.erb +7 -0
  109. data/web/views/layout.erb +3 -2
  110. data/web/views/metrics.erb +91 -0
  111. data/web/views/metrics_for_job.erb +59 -0
  112. data/web/views/morgue.erb +11 -15
  113. data/web/views/queue.erb +25 -25
  114. data/web/views/queues.erb +13 -7
  115. data/web/views/retries.erb +12 -16
  116. data/web/views/retry.erb +1 -1
  117. data/web/views/scheduled.erb +13 -14
  118. metadata +65 -56
  119. data/.circleci/config.yml +0 -82
  120. data/.github/contributing.md +0 -32
  121. data/.github/issue_template.md +0 -11
  122. data/.gitignore +0 -13
  123. data/.standard.yml +0 -20
  124. data/3.0-Upgrade.md +0 -70
  125. data/4.0-Upgrade.md +0 -53
  126. data/5.0-Upgrade.md +0 -56
  127. data/6.0-Upgrade.md +0 -72
  128. data/COMM-LICENSE +0 -97
  129. data/Ent-2.0-Upgrade.md +0 -37
  130. data/Ent-Changes.md +0 -256
  131. data/Gemfile +0 -24
  132. data/Gemfile.lock +0 -199
  133. data/LICENSE +0 -9
  134. data/Pro-2.0-Upgrade.md +0 -138
  135. data/Pro-3.0-Upgrade.md +0 -44
  136. data/Pro-4.0-Upgrade.md +0 -35
  137. data/Pro-5.0-Upgrade.md +0 -25
  138. data/Pro-Changes.md +0 -776
  139. data/Rakefile +0 -10
  140. data/code_of_conduct.md +0 -50
  141. data/lib/generators/sidekiq/worker_generator.rb +0 -57
  142. data/lib/sidekiq/delay.rb +0 -41
  143. data/lib/sidekiq/exception_handler.rb +0 -27
  144. data/lib/sidekiq/extensions/action_mailer.rb +0 -47
  145. data/lib/sidekiq/extensions/active_record.rb +0 -42
  146. data/lib/sidekiq/extensions/class_methods.rb +0 -42
  147. data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
@@ -4,11 +4,12 @@ require "sidekiq/client"
4
4
 
5
5
  module Sidekiq
6
6
  ##
7
- # Include this module in your worker class and you can easily create
7
+ # Include this module in your job class and you can easily create
8
8
  # asynchronous jobs:
9
9
  #
10
- # class HardWorker
11
- # include Sidekiq::Worker
10
+ # class HardJob
11
+ # include Sidekiq::Job
12
+ # sidekiq_options queue: 'critical', retry: 5
12
13
  #
13
14
  # def perform(*args)
14
15
  # # do some work
@@ -17,10 +18,30 @@ module Sidekiq
17
18
  #
18
19
  # Then in your Rails app, you can do this:
19
20
  #
20
- # HardWorker.perform_async(1, 2, 3)
21
+ # HardJob.perform_async(1, 2, 3)
21
22
  #
22
23
  # Note that perform_async is a class method, perform is an instance method.
23
- module Worker
24
+ #
25
+ # Sidekiq::Job also includes several APIs to provide compatibility with
26
+ # ActiveJob.
27
+ #
28
+ # class SomeJob
29
+ # include Sidekiq::Job
30
+ # queue_as :critical
31
+ #
32
+ # def perform(...)
33
+ # end
34
+ # end
35
+ #
36
+ # SomeJob.set(wait_until: 1.hour).perform_async(123)
37
+ #
38
+ # Note that arguments passed to the job must still obey Sidekiq's
39
+ # best practice for simple, JSON-native data types. Sidekiq will not
40
+ # implement ActiveJob's more complex argument serialization. For
41
+ # this reason, we don't implement `perform_later` as our call semantics
42
+ # are very different.
43
+ #
44
+ module Job
24
45
  ##
25
46
  # The Options module is extracted so we can include it in ActiveJob::Base
26
47
  # and allow native AJs to configure Sidekiq features/internals.
@@ -36,11 +57,11 @@ module Sidekiq
36
57
  ACCESSOR_MUTEX = Mutex.new
37
58
 
38
59
  ##
39
- # Allows customization for this type of Worker.
60
+ # Allows customization for this type of Job.
40
61
  # Legal options:
41
62
  #
42
63
  # queue - name of queue to use for this job type, default *default*
43
- # retry - enable retries for this Worker in case of error during execution,
64
+ # retry - enable retries for this Job in case of error during execution,
44
65
  # *true* to use the default or *Integer* count
45
66
  # backtrace - whether to save any error backtrace in the retry payload to display in web UI,
46
67
  # can be true, false or an integer number of lines to save, default *false*
@@ -48,8 +69,8 @@ module Sidekiq
48
69
  # In practice, any option is allowed. This is the main mechanism to configure the
49
70
  # options for a specific job.
50
71
  def sidekiq_options(opts = {})
51
- opts = Hash[opts.map { |k, v| [k.to_s, v] }] # stringify
52
- self.sidekiq_options_hash = get_sidekiq_options.merge(Hash[opts.map { |k, v| [k.to_s, v] }])
72
+ opts = opts.transform_keys(&:to_s) # stringify
73
+ self.sidekiq_options_hash = get_sidekiq_options.merge(opts)
53
74
  end
54
75
 
55
76
  def sidekiq_retry_in(&block)
@@ -61,7 +82,7 @@ module Sidekiq
61
82
  end
62
83
 
63
84
  def get_sidekiq_options # :nodoc:
64
- self.sidekiq_options_hash ||= Sidekiq.default_worker_options
85
+ self.sidekiq_options_hash ||= Sidekiq.default_job_options
65
86
  end
66
87
 
67
88
  def sidekiq_class_attribute(*attrs)
@@ -135,7 +156,7 @@ module Sidekiq
135
156
  attr_accessor :jid
136
157
 
137
158
  def self.included(base)
138
- raise ArgumentError, "Sidekiq::Worker cannot be included in an ActiveJob: #{base.name}" if base.ancestors.any? { |c| c.name == "ActiveJob::Base" }
159
+ raise ArgumentError, "Sidekiq::Job cannot be included in an ActiveJob: #{base.name}" if base.ancestors.any? { |c| c.name == "ActiveJob::Base" }
139
160
 
140
161
  base.include(Options)
141
162
  base.extend(ClassMethods)
@@ -147,49 +168,114 @@ module Sidekiq
147
168
 
148
169
  # This helper class encapsulates the set options for `set`, e.g.
149
170
  #
150
- # SomeWorker.set(queue: 'foo').perform_async(....)
171
+ # SomeJob.set(queue: 'foo').perform_async(....)
151
172
  #
152
173
  class Setter
174
+ include Sidekiq::JobUtil
175
+
153
176
  def initialize(klass, opts)
154
177
  @klass = klass
155
- @opts = opts
178
+ # NB: the internal hash always has stringified keys
179
+ @opts = opts.transform_keys(&:to_s)
180
+
181
+ # ActiveJob compatibility
182
+ interval = @opts.delete("wait_until") || @opts.delete("wait")
183
+ at(interval) if interval
156
184
  end
157
185
 
158
186
  def set(options)
159
- @opts.merge!(options)
187
+ hash = options.transform_keys(&:to_s)
188
+ interval = hash.delete("wait_until") || @opts.delete("wait")
189
+ @opts.merge!(hash)
190
+ at(interval) if interval
160
191
  self
161
192
  end
162
193
 
163
194
  def perform_async(*args)
164
- @klass.client_push(@opts.merge("args" => args, "class" => @klass))
195
+ if @opts["sync"] == true
196
+ perform_inline(*args)
197
+ else
198
+ @klass.client_push(@opts.merge("args" => args, "class" => @klass))
199
+ end
200
+ end
201
+
202
+ # Explicit inline execution of a job. Returns nil if the job did not
203
+ # execute, true otherwise.
204
+ def perform_inline(*args)
205
+ raw = @opts.merge("args" => args, "class" => @klass)
206
+
207
+ # validate and normalize payload
208
+ item = normalize_item(raw)
209
+ queue = item["queue"]
210
+
211
+ # run client-side middleware
212
+ cfg = Sidekiq.default_configuration
213
+ result = cfg.client_middleware.invoke(item["class"], item, queue, cfg.redis_pool) do
214
+ item
215
+ end
216
+ return nil unless result
217
+
218
+ # round-trip the payload via JSON
219
+ msg = Sidekiq.load_json(Sidekiq.dump_json(item))
220
+
221
+ # prepare the job instance
222
+ klass = Object.const_get(msg["class"])
223
+ job = klass.new
224
+ job.jid = msg["jid"]
225
+ job.bid = msg["bid"] if job.respond_to?(:bid)
226
+
227
+ # run the job through server-side middleware
228
+ result = cfg.server_middleware.invoke(job, msg, msg["queue"]) do
229
+ # perform it
230
+ job.perform(*msg["args"])
231
+ true
232
+ end
233
+ return nil unless result
234
+ # jobs do not return a result. they should store any
235
+ # modified state.
236
+ true
237
+ end
238
+ alias_method :perform_sync, :perform_inline
239
+
240
+ def perform_bulk(args, batch_size: 1_000)
241
+ client = @klass.build_client
242
+ client.push_bulk(@opts.merge("class" => @klass, "args" => args, :batch_size => batch_size))
165
243
  end
166
244
 
167
245
  # +interval+ must be a timestamp, numeric or something that acts
168
246
  # numeric (like an activesupport time interval).
169
247
  def perform_in(interval, *args)
248
+ at(interval).perform_async(*args)
249
+ end
250
+ alias_method :perform_at, :perform_in
251
+
252
+ private
253
+
254
+ def at(interval)
170
255
  int = interval.to_f
171
256
  now = Time.now.to_f
172
- ts = (int < 1_000_000_000 ? now + int : int)
173
-
174
- payload = @opts.merge("class" => @klass, "args" => args)
257
+ ts = ((int < 1_000_000_000) ? now + int : int)
175
258
  # Optimization to enqueue something now that is scheduled to go out now or in the past
176
- payload["at"] = ts if ts > now
177
- @klass.client_push(payload)
259
+ @opts["at"] = ts if ts > now
260
+ self
178
261
  end
179
- alias_method :perform_at, :perform_in
180
262
  end
181
263
 
182
264
  module ClassMethods
183
265
  def delay(*args)
184
- raise ArgumentError, "Do not call .delay on a Sidekiq::Worker class, call .perform_async"
266
+ raise ArgumentError, "Do not call .delay on a Sidekiq::Job class, call .perform_async"
185
267
  end
186
268
 
187
269
  def delay_for(*args)
188
- raise ArgumentError, "Do not call .delay_for on a Sidekiq::Worker class, call .perform_in"
270
+ raise ArgumentError, "Do not call .delay_for on a Sidekiq::Job class, call .perform_in"
189
271
  end
190
272
 
191
273
  def delay_until(*args)
192
- raise ArgumentError, "Do not call .delay_until on a Sidekiq::Worker class, call .perform_at"
274
+ raise ArgumentError, "Do not call .delay_until on a Sidekiq::Job class, call .perform_at"
275
+ end
276
+
277
+ def queue_as(q)
278
+ sidekiq_options("queue" => q.to_s)
193
279
  end
194
280
 
195
281
  def set(options)
@@ -197,7 +283,37 @@ module Sidekiq
197
283
  end
198
284
 
199
285
  def perform_async(*args)
200
- client_push("class" => self, "args" => args)
286
+ Setter.new(self, {}).perform_async(*args)
287
+ end
288
+
289
+ # Inline execution of job's perform method after passing through Sidekiq.client_middleware and Sidekiq.server_middleware
290
+ def perform_inline(*args)
291
+ Setter.new(self, {}).perform_inline(*args)
292
+ end
293
+ alias_method :perform_sync, :perform_inline
294
+
295
+ ##
296
+ # Push a large number of jobs to Redis, while limiting the batch of
297
+ # each job payload to 1,000. This method helps cut down on the number
298
+ # of round trips to Redis, which can increase the performance of enqueueing
299
+ # large numbers of jobs.
300
+ #
301
+ # +items+ must be an Array of Arrays.
302
+ #
303
+ # For finer-grained control, use `Sidekiq::Client.push_bulk` directly.
304
+ #
305
+ # Example (3 Redis round trips):
306
+ #
307
+ # SomeJob.perform_async(1)
308
+ # SomeJob.perform_async(2)
309
+ # SomeJob.perform_async(3)
310
+ #
311
+ # Would instead become (1 Redis round trip):
312
+ #
313
+ # SomeJob.perform_bulk([[1], [2], [3]])
314
+ #
315
+ def perform_bulk(*args, **kwargs)
316
+ Setter.new(self, {}).perform_bulk(*args, **kwargs)
201
317
  end
202
318
 
203
319
  # +interval+ must be a timestamp, numeric or something that acts
@@ -205,7 +321,7 @@ module Sidekiq
205
321
  def perform_in(interval, *args)
206
322
  int = interval.to_f
207
323
  now = Time.now.to_f
208
- ts = (int < 1_000_000_000 ? now + int : int)
324
+ ts = ((int < 1_000_000_000) ? now + int : int)
209
325
 
210
326
  item = {"class" => self, "args" => args}
211
327
 
@@ -217,11 +333,11 @@ module Sidekiq
217
333
  alias_method :perform_at, :perform_in
218
334
 
219
335
  ##
220
- # Allows customization for this type of Worker.
336
+ # Allows customization for this type of Job.
221
337
  # Legal options:
222
338
  #
223
- # queue - use a named queue for this Worker, default 'default'
224
- # retry - enable the RetryJobs middleware for this Worker, *true* to use the default
339
+ # queue - use a named queue for this Job, default 'default'
340
+ # retry - enable the RetryJobs middleware for this Job, *true* to use the default
225
341
  # or *Integer* count
226
342
  # backtrace - whether to save any error backtrace in the retry payload to display in web UI,
227
343
  # can be true, false or an integer number of lines to save, default *false*
@@ -234,13 +350,24 @@ module Sidekiq
234
350
  end
235
351
 
236
352
  def client_push(item) # :nodoc:
237
- pool = Thread.current[:sidekiq_via_pool] || get_sidekiq_options["pool"] || Sidekiq.redis_pool
238
- # stringify
239
- item.keys.each do |key|
240
- item[key.to_s] = item.delete(key)
353
+ raise ArgumentError, "Job payloads should contain no Symbols: #{item}" if item.any? { |k, v| k.is_a?(::Symbol) }
354
+
355
+ # allow the user to dynamically re-target jobs to another shard using the "pool" attribute
356
+ # FooJob.set(pool: SOME_POOL).perform_async
357
+ old = Thread.current[:sidekiq_redis_pool]
358
+ pool = item.delete("pool")
359
+ Thread.current[:sidekiq_redis_pool] = pool if pool
360
+ begin
361
+ build_client.push(item)
362
+ ensure
363
+ Thread.current[:sidekiq_redis_pool] = old
241
364
  end
365
+ end
242
366
 
243
- Sidekiq::Client.new(pool).push(item)
367
+ def build_client # :nodoc:
368
+ pool = Thread.current[:sidekiq_redis_pool] || get_sidekiq_options["pool"] || Sidekiq.default_configuration.redis_pool
369
+ client_class = get_sidekiq_options["client_class"] || Sidekiq::Client
370
+ client_class.new(pool: pool)
244
371
  end
245
372
  end
246
373
  end
@@ -2,7 +2,7 @@
2
2
 
3
3
  module Sidekiq
4
4
  class JobLogger
5
- def initialize(logger = Sidekiq.logger)
5
+ def initialize(logger)
6
6
  @logger = logger
7
7
  end
8
8
 
@@ -12,46 +12,34 @@ module Sidekiq
12
12
 
13
13
  yield
14
14
 
15
- with_elapsed_time_context(start) do
16
- @logger.info("done")
17
- end
15
+ Sidekiq::Context.add(:elapsed, elapsed(start))
16
+ @logger.info("done")
18
17
  rescue Exception
19
- with_elapsed_time_context(start) do
20
- @logger.info("fail")
21
- end
18
+ Sidekiq::Context.add(:elapsed, elapsed(start))
19
+ @logger.info("fail")
22
20
 
23
21
  raise
24
22
  end
25
23
 
26
24
  def prepare(job_hash, &block)
27
- level = job_hash["log_level"]
28
- if level
29
- @logger.log_at(level) do
30
- Sidekiq::Context.with(job_hash_context(job_hash), &block)
31
- end
32
- else
33
- Sidekiq::Context.with(job_hash_context(job_hash), &block)
34
- end
35
- end
36
-
37
- def job_hash_context(job_hash)
38
25
  # If we're using a wrapper class, like ActiveJob, use the "wrapped"
39
26
  # attribute to expose the underlying thing.
40
27
  h = {
41
- class: job_hash["wrapped"] || job_hash["class"],
42
- jid: job_hash["jid"],
28
+ class: job_hash["display_class"] || job_hash["wrapped"] || job_hash["class"],
29
+ jid: job_hash["jid"]
43
30
  }
44
- h[:bid] = job_hash["bid"] if job_hash["bid"]
45
- h[:tags] = job_hash["tags"] if job_hash["tags"]
46
- h
47
- end
48
-
49
- def with_elapsed_time_context(start, &block)
50
- Sidekiq::Context.with(elapsed_time_context(start), &block)
51
- end
31
+ h[:bid] = job_hash["bid"] if job_hash.has_key?("bid")
32
+ h[:tags] = job_hash["tags"] if job_hash.has_key?("tags")
52
33
 
53
- def elapsed_time_context(start)
54
- {elapsed: elapsed(start).to_s}
34
+ Thread.current[:sidekiq_context] = h
35
+ level = job_hash["log_level"]
36
+ if level && @logger.respond_to?(:log_at)
37
+ @logger.log_at(level, &block)
38
+ else
39
+ yield
40
+ end
41
+ ensure
42
+ Thread.current[:sidekiq_context] = nil
55
43
  end
56
44
 
57
45
  private
@@ -1,10 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "sidekiq/scheduled"
4
- require "sidekiq/api"
5
-
6
3
  require "zlib"
7
4
  require "base64"
5
+ require "sidekiq/component"
8
6
 
9
7
  module Sidekiq
10
8
  ##
@@ -25,18 +23,19 @@ module Sidekiq
25
23
  #
26
24
  # A job looks like:
27
25
  #
28
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
26
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
29
27
  #
30
28
  # The 'retry' option also accepts a number (in place of 'true'):
31
29
  #
32
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
30
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
33
31
  #
34
32
  # The job will be retried this number of times before giving up. (If simply
35
33
  # 'true', Sidekiq retries 25 times)
36
34
  #
37
- # We'll add a bit more data to the job to support retries:
35
+ # Relevant options for job retries:
38
36
  #
39
- # * 'queue' - the queue to use
37
+ # * 'queue' - the queue for the initial job
38
+ # * 'retry_queue' - if job retries should be pushed to a different (e.g. lower priority) queue
40
39
  # * 'retry_count' - number of times we've retried so far.
41
40
  # * 'error_message' - the message from the exception
42
41
  # * 'error_class' - the exception class
@@ -50,30 +49,34 @@ module Sidekiq
50
49
  # The default number of retries is 25 which works out to about 3 weeks
51
50
  # You can change the default maximum number of retries in your initializer:
52
51
  #
53
- # Sidekiq.options[:max_retries] = 7
52
+ # Sidekiq.default_configuration[:max_retries] = 7
54
53
  #
55
- # or limit the number of retries for a particular worker with:
54
+ # or limit the number of retries for a particular job and send retries to
55
+ # a low priority queue with:
56
56
  #
57
- # class MyWorker
58
- # include Sidekiq::Worker
59
- # sidekiq_options :retry => 10
57
+ # class MyJob
58
+ # include Sidekiq::Job
59
+ # sidekiq_options retry: 10, retry_queue: 'low'
60
60
  # end
61
61
  #
62
62
  class JobRetry
63
63
  class Handled < ::RuntimeError; end
64
+
64
65
  class Skip < Handled; end
65
66
 
66
- include Sidekiq::Util
67
+ include Sidekiq::Component
67
68
 
68
69
  DEFAULT_MAX_RETRY_ATTEMPTS = 25
69
70
 
70
- def initialize(options = {})
71
- @max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
71
+ def initialize(capsule)
72
+ @config = @capsule = capsule
73
+ @max_retries = Sidekiq.default_configuration[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
74
+ @backtrace_cleaner = Sidekiq.default_configuration[:backtrace_cleaner]
72
75
  end
73
76
 
74
77
  # The global retry handler requires only the barest of data.
75
78
  # We want to be able to retry as much as possible so we don't
76
- # require the worker to be instantiated.
79
+ # require the job to be instantiated.
77
80
  def global(jobstr, queue)
78
81
  yield
79
82
  rescue Handled => ex
@@ -87,9 +90,9 @@ module Sidekiq
87
90
 
88
91
  msg = Sidekiq.load_json(jobstr)
89
92
  if msg["retry"]
90
- attempt_retry(nil, msg, queue, e)
93
+ process_retry(nil, msg, queue, e)
91
94
  else
92
- Sidekiq.death_handlers.each do |handler|
95
+ @capsule.config.death_handlers.each do |handler|
93
96
  handler.call(msg, e)
94
97
  rescue => handler_ex
95
98
  handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
@@ -100,14 +103,14 @@ module Sidekiq
100
103
  end
101
104
 
102
105
  # The local retry support means that any errors that occur within
103
- # this block can be associated with the given worker instance.
106
+ # this block can be associated with the given job instance.
104
107
  # This is required to support the `sidekiq_retries_exhausted` block.
105
108
  #
106
109
  # Note that any exception from the block is wrapped in the Skip
107
110
  # exception so the global block does not reprocess the error. The
108
111
  # Skip exception is unwrapped within Sidekiq::Processor#process before
109
112
  # calling the handle_exception handlers.
110
- def local(worker, jobstr, queue)
113
+ def local(jobinst, jobstr, queue)
111
114
  yield
112
115
  rescue Handled => ex
113
116
  raise ex
@@ -120,11 +123,11 @@ module Sidekiq
120
123
 
121
124
  msg = Sidekiq.load_json(jobstr)
122
125
  if msg["retry"].nil?
123
- msg["retry"] = worker.class.get_sidekiq_options["retry"]
126
+ msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
124
127
  end
125
128
 
126
129
  raise e unless msg["retry"]
127
- attempt_retry(worker, msg, queue, e)
130
+ process_retry(jobinst, msg, queue, e)
128
131
  # We've handled this error associated with this job, don't
129
132
  # need to handle it at the global level
130
133
  raise Skip
@@ -132,10 +135,10 @@ module Sidekiq
132
135
 
133
136
  private
134
137
 
135
- # Note that +worker+ can be nil here if an error is raised before we can
136
- # instantiate the worker instance. All access must be guarded and
138
+ # Note that +jobinst+ can be nil here if an error is raised before we can
139
+ # instantiate the job instance. All access must be guarded and
137
140
  # best effort.
138
- def attempt_retry(worker, msg, queue, exception)
141
+ def process_retry(jobinst, msg, queue, exception)
139
142
  max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
140
143
 
141
144
  msg["queue"] = (msg["retry_queue"] || queue)
@@ -157,41 +160,89 @@ module Sidekiq
157
160
  end
158
161
 
159
162
  if msg["backtrace"]
163
+ backtrace = @backtrace_cleaner.call(exception.backtrace)
160
164
  lines = if msg["backtrace"] == true
161
- exception.backtrace
165
+ backtrace
162
166
  else
163
- exception.backtrace[0...msg["backtrace"].to_i]
167
+ backtrace[0...msg["backtrace"].to_i]
164
168
  end
165
169
 
166
170
  msg["error_backtrace"] = compress_backtrace(lines)
167
171
  end
168
172
 
169
- if count < max_retry_attempts
170
- delay = delay_for(worker, count, exception)
171
- # Logging here can break retries if the logging device raises ENOSPC #3979
172
- # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
173
- retry_at = Time.now.to_f + delay
174
- payload = Sidekiq.dump_json(msg)
175
- Sidekiq.redis do |conn|
176
- conn.zadd("retry", retry_at.to_s, payload)
173
+ return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
174
+
175
+ rf = msg["retry_for"]
176
+ return retries_exhausted(jobinst, msg, exception) if rf && ((msg["failed_at"] + rf) < Time.now.to_f)
177
+
178
+ strategy, delay = delay_for(jobinst, count, exception, msg)
179
+ case strategy
180
+ when :discard
181
+ return # poof!
182
+ when :kill
183
+ return retries_exhausted(jobinst, msg, exception)
184
+ end
185
+
186
+ # Logging here can break retries if the logging device raises ENOSPC #3979
187
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
188
+ jitter = rand(10) * (count + 1)
189
+ retry_at = Time.now.to_f + delay + jitter
190
+ payload = Sidekiq.dump_json(msg)
191
+ redis do |conn|
192
+ conn.zadd("retry", retry_at.to_s, payload)
193
+ end
194
+ end
195
+
196
+ # returns (strategy, seconds)
197
+ def delay_for(jobinst, count, exception, msg)
198
+ rv = begin
199
+ # sidekiq_retry_in can return two different things:
200
+ # 1. When to retry next, as an integer of seconds
201
+ # 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
202
+ block = jobinst&.sidekiq_retry_in_block
203
+
204
+ # the sidekiq_retry_in_block can be defined in a wrapped class (ActiveJob for instance)
205
+ unless msg["wrapped"].nil?
206
+ wrapped = Object.const_get(msg["wrapped"])
207
+ block = wrapped.respond_to?(:sidekiq_retry_in_block) ? wrapped.sidekiq_retry_in_block : nil
177
208
  end
178
- else
179
- # Goodbye dear message, you (re)tried your best I'm sure.
180
- retries_exhausted(worker, msg, exception)
209
+ block&.call(count, exception, msg)
210
+ rescue Exception => e
211
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
212
+ nil
181
213
  end
214
+
215
+ rv = rv.to_i if rv.respond_to?(:to_i)
216
+ delay = (count**4) + 15
217
+ if Integer === rv && rv > 0
218
+ delay = rv
219
+ elsif rv == :discard
220
+ return [:discard, nil] # do nothing, job goes poof
221
+ elsif rv == :kill
222
+ return [:kill, nil]
223
+ end
224
+
225
+ [:default, delay]
182
226
  end
183
227
 
184
- def retries_exhausted(worker, msg, exception)
185
- begin
186
- block = worker&.sidekiq_retries_exhausted_block
228
+ def retries_exhausted(jobinst, msg, exception)
229
+ rv = begin
230
+ block = jobinst&.sidekiq_retries_exhausted_block
231
+
232
+ # the sidekiq_retries_exhausted_block can be defined in a wrapped class (ActiveJob for instance)
233
+ unless msg["wrapped"].nil?
234
+ wrapped = Object.const_get(msg["wrapped"])
235
+ block = wrapped.respond_to?(:sidekiq_retries_exhausted_block) ? wrapped.sidekiq_retries_exhausted_block : nil
236
+ end
187
237
  block&.call(msg, exception)
188
238
  rescue => e
189
239
  handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
190
240
  end
191
241
 
242
+ return if rv == :discard # poof!
192
243
  send_to_morgue(msg) unless msg["dead"] == false
193
244
 
194
- Sidekiq.death_handlers.each do |handler|
245
+ @capsule.config.death_handlers.each do |handler|
195
246
  handler.call(msg, exception)
196
247
  rescue => e
197
248
  handle_exception(e, {context: "Error calling death handler", job: msg})
@@ -201,7 +252,15 @@ module Sidekiq
201
252
  def send_to_morgue(msg)
202
253
  logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
203
254
  payload = Sidekiq.dump_json(msg)
204
- DeadSet.new.kill(payload, notify_failure: false)
255
+ now = Time.now.to_f
256
+
257
+ redis do |conn|
258
+ conn.multi do |xa|
259
+ xa.zadd("dead", now.to_s, payload)
260
+ xa.zremrangebyscore("dead", "-inf", now - @capsule.config[:dead_timeout_in_seconds])
261
+ xa.zremrangebyrank("dead", 0, - @capsule.config[:dead_max_jobs])
262
+ end
263
+ end
205
264
  end
206
265
 
207
266
  def retry_attempts_from(msg_retry, default)
@@ -212,26 +271,6 @@ module Sidekiq
212
271
  end
213
272
  end
214
273
 
215
- def delay_for(worker, count, exception)
216
- if worker&.sidekiq_retry_in_block
217
- custom_retry_in = retry_in(worker, count, exception).to_i
218
- return custom_retry_in if custom_retry_in > 0
219
- end
220
- seconds_to_delay(count)
221
- end
222
-
223
- # delayed_job uses the same basic formula
224
- def seconds_to_delay(count)
225
- (count**4) + 15 + (rand(30) * (count + 1))
226
- end
227
-
228
- def retry_in(worker, count, exception)
229
- worker.sidekiq_retry_in_block.call(count, exception)
230
- rescue Exception => e
231
- handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
232
- nil
233
- end
234
-
235
274
  def exception_caused_by_shutdown?(e, checked_causes = [])
236
275
  return false unless e.cause
237
276