sidekiq 6.0.0 → 6.1.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (71) hide show
  1. checksums.yaml +4 -4
  2. data/.github/ISSUE_TEMPLATE/bug_report.md +20 -0
  3. data/.github/workflows/ci.yml +41 -0
  4. data/6.0-Upgrade.md +3 -1
  5. data/Changes.md +163 -1
  6. data/Ent-Changes.md +33 -2
  7. data/Gemfile +2 -2
  8. data/Gemfile.lock +109 -113
  9. data/Pro-Changes.md +39 -2
  10. data/README.md +4 -6
  11. data/bin/sidekiq +26 -2
  12. data/bin/sidekiqload +8 -4
  13. data/bin/sidekiqmon +4 -5
  14. data/lib/generators/sidekiq/worker_generator.rb +11 -1
  15. data/lib/sidekiq/api.rb +130 -94
  16. data/lib/sidekiq/cli.rb +40 -24
  17. data/lib/sidekiq/client.rb +33 -12
  18. data/lib/sidekiq/extensions/action_mailer.rb +3 -2
  19. data/lib/sidekiq/extensions/active_record.rb +4 -3
  20. data/lib/sidekiq/extensions/class_methods.rb +5 -4
  21. data/lib/sidekiq/fetch.rb +26 -26
  22. data/lib/sidekiq/job_logger.rb +12 -4
  23. data/lib/sidekiq/job_retry.rb +23 -10
  24. data/lib/sidekiq/launcher.rb +35 -10
  25. data/lib/sidekiq/logger.rb +108 -12
  26. data/lib/sidekiq/manager.rb +4 -4
  27. data/lib/sidekiq/middleware/chain.rb +12 -3
  28. data/lib/sidekiq/monitor.rb +3 -18
  29. data/lib/sidekiq/paginator.rb +7 -2
  30. data/lib/sidekiq/processor.rb +22 -24
  31. data/lib/sidekiq/rails.rb +16 -18
  32. data/lib/sidekiq/redis_connection.rb +21 -13
  33. data/lib/sidekiq/scheduled.rb +13 -12
  34. data/lib/sidekiq/sd_notify.rb +149 -0
  35. data/lib/sidekiq/systemd.rb +24 -0
  36. data/lib/sidekiq/testing.rb +13 -1
  37. data/lib/sidekiq/util.rb +0 -2
  38. data/lib/sidekiq/version.rb +1 -1
  39. data/lib/sidekiq/web/application.rb +23 -24
  40. data/lib/sidekiq/web/csrf_protection.rb +158 -0
  41. data/lib/sidekiq/web/helpers.rb +25 -16
  42. data/lib/sidekiq/web/router.rb +2 -4
  43. data/lib/sidekiq/web.rb +16 -8
  44. data/lib/sidekiq/worker.rb +8 -11
  45. data/lib/sidekiq.rb +22 -8
  46. data/sidekiq.gemspec +3 -4
  47. data/web/assets/javascripts/application.js +25 -27
  48. data/web/assets/javascripts/dashboard.js +2 -2
  49. data/web/assets/stylesheets/application-dark.css +143 -0
  50. data/web/assets/stylesheets/application.css +16 -6
  51. data/web/locales/de.yml +14 -2
  52. data/web/locales/en.yml +2 -0
  53. data/web/locales/fr.yml +2 -2
  54. data/web/locales/ja.yml +2 -0
  55. data/web/locales/lt.yml +83 -0
  56. data/web/locales/pl.yml +4 -4
  57. data/web/locales/ru.yml +4 -0
  58. data/web/locales/vi.yml +83 -0
  59. data/web/views/_job_info.erb +2 -1
  60. data/web/views/busy.erb +6 -3
  61. data/web/views/dead.erb +2 -2
  62. data/web/views/layout.erb +1 -0
  63. data/web/views/morgue.erb +5 -2
  64. data/web/views/queue.erb +10 -1
  65. data/web/views/queues.erb +9 -1
  66. data/web/views/retries.erb +5 -2
  67. data/web/views/retry.erb +2 -2
  68. data/web/views/scheduled.erb +5 -2
  69. metadata +21 -29
  70. data/.circleci/config.yml +0 -61
  71. data/.github/issue_template.md +0 -11
data/lib/sidekiq/cli.rb CHANGED
@@ -33,17 +33,21 @@ module Sidekiq
33
33
  # Code within this method is not tested because it alters
34
34
  # global process state irreversibly. PRs which improve the
35
35
  # test coverage of Sidekiq::CLI are welcomed.
36
- def run
37
- boot_system
36
+ def run(boot_app: true)
37
+ boot_application if boot_app
38
+
38
39
  if environment == "development" && $stdout.tty? && Sidekiq.log_formatter.is_a?(Sidekiq::Logger::Formatters::Pretty)
39
40
  print_banner
40
41
  end
42
+ logger.info "Booted Rails #{::Rails.version} application in #{environment} environment" if rails_app?
41
43
 
42
44
  self_read, self_write = IO.pipe
43
45
  sigs = %w[INT TERM TTIN TSTP]
46
+ # USR1 and USR2 don't work on the JVM
47
+ sigs << "USR2" if Sidekiq.pro? && !jruby?
44
48
  sigs.each do |sig|
45
49
  trap sig do
46
- self_write.write("#{sig}\n")
50
+ self_write.puts(sig)
47
51
  end
48
52
  rescue ArgumentError
49
53
  puts "Signal #{sig} not supported"
@@ -51,12 +55,12 @@ module Sidekiq
51
55
 
52
56
  logger.info "Running in #{RUBY_DESCRIPTION}"
53
57
  logger.info Sidekiq::LICENSE
54
- logger.info "Upgrade to Sidekiq Pro for more features and support: http://sidekiq.org" unless defined?(::Sidekiq::Pro)
58
+ logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
55
59
 
56
60
  # touch the connection pool so it is created before we
57
61
  # fire startup and start multithreading.
58
62
  ver = Sidekiq.redis_info["redis_version"]
59
- raise "You are using Redis v#{ver}, Sidekiq requires Redis v4.0.0 or greater" if ver < "4"
63
+ raise "You are connecting to Redis v#{ver}, Sidekiq requires Redis v4.0.0 or greater" if ver < "4"
60
64
 
61
65
  # Since the user can pass us a connection pool explicitly in the initializer, we
62
66
  # need to verify the size is large enough or else Sidekiq's performance is dramatically slowed.
@@ -160,17 +164,14 @@ module Sidekiq
160
164
  Sidekiq.logger.warn "<no backtrace available>"
161
165
  end
162
166
  end
163
- },
167
+ }
164
168
  }
169
+ UNHANDLED_SIGNAL_HANDLER = ->(cli) { Sidekiq.logger.info "No signal handler registered, ignoring" }
170
+ SIGNAL_HANDLERS.default = UNHANDLED_SIGNAL_HANDLER
165
171
 
166
172
  def handle_signal(sig)
167
173
  Sidekiq.logger.debug "Got #{sig} signal"
168
- handy = SIGNAL_HANDLERS[sig]
169
- if handy
170
- handy.call(self)
171
- else
172
- Sidekiq.logger.info { "No signal handler for #{sig}" }
173
- end
174
+ SIGNAL_HANDLERS[sig].call(self)
174
175
  end
175
176
 
176
177
  private
@@ -182,7 +183,11 @@ module Sidekiq
182
183
  end
183
184
 
184
185
  def set_environment(cli_env)
185
- @environment = cli_env || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
186
+ # See #984 for discussion.
187
+ # APP_ENV is now the preferred ENV term since it is not tech-specific.
188
+ # Both Sinatra 2.0+ and Sidekiq support this term.
189
+ # RAILS_ENV and RACK_ENV are there for legacy support.
190
+ @environment = cli_env || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
186
191
  end
187
192
 
188
193
  def symbolize_keys_deep!(hash)
@@ -204,7 +209,7 @@ module Sidekiq
204
209
 
205
210
  # check config file presence
206
211
  if opts[:config_file]
207
- if opts[:config_file] && !File.exist?(opts[:config_file])
212
+ unless File.exist?(opts[:config_file])
208
213
  raise ArgumentError, "No such file #{opts[:config_file]}"
209
214
  end
210
215
  else
@@ -224,8 +229,7 @@ module Sidekiq
224
229
  opts = parse_config(opts[:config_file]).merge(opts) if opts[:config_file]
225
230
 
226
231
  # set defaults
227
- opts[:queues] = Array(opts[:queues]) << "default" if opts[:queues].nil? || opts[:queues].empty?
228
- opts[:strict] = true if opts[:strict].nil?
232
+ opts[:queues] = ["default"] if opts[:queues].nil?
229
233
  opts[:concurrency] = Integer(ENV["RAILS_MAX_THREADS"]) if opts[:concurrency].nil? && ENV["RAILS_MAX_THREADS"]
230
234
 
231
235
  # merge with defaults
@@ -236,7 +240,7 @@ module Sidekiq
236
240
  Sidekiq.options
237
241
  end
238
242
 
239
- def boot_system
243
+ def boot_application
240
244
  ENV["RACK_ENV"] = ENV["RAILS_ENV"] = environment
241
245
 
242
246
  if File.directory?(options[:require])
@@ -283,8 +287,13 @@ module Sidekiq
283
287
 
284
288
  def parse_options(argv)
285
289
  opts = {}
290
+ @parser = option_parser(opts)
291
+ @parser.parse!(argv)
292
+ opts
293
+ end
286
294
 
287
- @parser = OptionParser.new { |o|
295
+ def option_parser(opts)
296
+ parser = OptionParser.new { |o|
288
297
  o.on "-c", "--concurrency INT", "processor threads to use" do |arg|
289
298
  opts[:concurrency] = Integer(arg)
290
299
  end
@@ -336,15 +345,13 @@ module Sidekiq
336
345
  end
337
346
  }
338
347
 
339
- @parser.banner = "sidekiq [options]"
340
- @parser.on_tail "-h", "--help", "Show help" do
341
- logger.info @parser
348
+ parser.banner = "sidekiq [options]"
349
+ parser.on_tail "-h", "--help", "Show help" do
350
+ logger.info parser
342
351
  die 1
343
352
  end
344
353
 
345
- @parser.parse!(argv)
346
-
347
- opts
354
+ parser
348
355
  end
349
356
 
350
357
  def initialize_logger
@@ -361,6 +368,8 @@ module Sidekiq
361
368
  end
362
369
 
363
370
  opts = opts.merge(opts.delete(environment.to_sym) || {})
371
+ opts.delete(:strict)
372
+
364
373
  parse_queues(opts, opts.delete(:queues) || [])
365
374
 
366
375
  opts
@@ -372,9 +381,16 @@ module Sidekiq
372
381
 
373
382
  def parse_queue(opts, queue, weight = nil)
374
383
  opts[:queues] ||= []
384
+ opts[:strict] = true if opts[:strict].nil?
375
385
  raise ArgumentError, "queues: #{queue} cannot be defined twice" if opts[:queues].include?(queue)
376
386
  [weight.to_i, 1].max.times { opts[:queues] << queue }
377
387
  opts[:strict] = false if weight.to_i > 0
378
388
  end
389
+
390
+ def rails_app?
391
+ defined?(::Rails) && ::Rails.respond_to?(:application)
392
+ end
379
393
  end
380
394
  end
395
+
396
+ require "sidekiq/systemd"
@@ -90,13 +90,19 @@ module Sidekiq
90
90
  # Returns an array of the of pushed jobs' jids. The number of jobs pushed can be less
91
91
  # than the number given if the middleware stopped processing for one or more jobs.
92
92
  def push_bulk(items)
93
- arg = items["args"].first
94
- return [] unless arg # no jobs to push
95
- raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless arg.is_a?(Array)
93
+ args = items["args"]
94
+ raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless args.is_a?(Array) && args.all?(Array)
95
+ return [] if args.empty? # no jobs to push
96
+
97
+ at = items.delete("at")
98
+ raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all?(Numeric))
99
+ raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
96
100
 
97
101
  normed = normalize_item(items)
98
- payloads = items["args"].map { |args|
99
- copy = normed.merge("args" => args, "jid" => SecureRandom.hex(12), "enqueued_at" => Time.now.to_f)
102
+ payloads = args.map.with_index { |job_args, index|
103
+ copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12), "enqueued_at" => Time.now.to_f)
104
+ copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
105
+
100
106
  result = process_single(items["class"], copy)
101
107
  result || nil
102
108
  }.compact
@@ -188,7 +194,7 @@ module Sidekiq
188
194
  end
189
195
 
190
196
  def atomic_push(conn, payloads)
191
- if payloads.first["at"]
197
+ if payloads.first.key?("at")
192
198
  conn.zadd("schedule", payloads.map { |hash|
193
199
  at = hash.delete("at").to_s
194
200
  [at, Sidekiq.dump_json(hash)]
@@ -213,20 +219,35 @@ module Sidekiq
213
219
  end
214
220
  end
215
221
 
222
+ def validate(item)
223
+ raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
224
+ raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
225
+ raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
226
+ raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
227
+ raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
228
+ end
229
+
216
230
  def normalize_item(item)
217
- raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: { 'class' => SomeWorker, 'args' => ['bob', 1, :foo => 'bar'] }") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
218
- raise(ArgumentError, "Job args must be an Array") unless item["args"].is_a?(Array)
219
- raise(ArgumentError, "Job class must be either a Class or String representation of the class name") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
220
- raise(ArgumentError, "Job 'at' must be a Numeric timestamp") if item.key?("at") && !item["at"].is_a?(Numeric)
231
+ # 6.0.0 push_bulk bug, #4321
232
+ # TODO Remove after a while...
233
+ item.delete("at") if item.key?("at") && item["at"].nil?
234
+
235
+ validate(item)
221
236
  # raise(ArgumentError, "Arguments must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices") unless JSON.load(JSON.dump(item['args'])) == item['args']
222
237
 
223
- normalized_hash(item["class"])
224
- .each { |key, value| item[key] = value if item[key].nil? }
238
+ # merge in the default sidekiq_options for the item's class and/or wrapped element
239
+ # this allows ActiveJobs to control sidekiq_options too.
240
+ defaults = normalized_hash(item["class"])
241
+ defaults = defaults.merge(item["wrapped"].get_sidekiq_options) if item["wrapped"].respond_to?("get_sidekiq_options")
242
+ item = defaults.merge(item)
243
+
244
+ raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
225
245
 
226
246
  item["class"] = item["class"].to_s
227
247
  item["queue"] = item["queue"].to_s
228
248
  item["jid"] ||= SecureRandom.hex(12)
229
249
  item["created_at"] ||= Time.now.to_f
250
+
230
251
  item
231
252
  end
232
253
 
@@ -5,9 +5,10 @@ require "sidekiq/extensions/generic_proxy"
5
5
  module Sidekiq
6
6
  module Extensions
7
7
  ##
8
- # Adds 'delay', 'delay_for' and `delay_until` methods to ActionMailer to offload arbitrary email
9
- # delivery to Sidekiq. Example:
8
+ # Adds +delay+, +delay_for+ and +delay_until+ methods to ActionMailer to offload arbitrary email
9
+ # delivery to Sidekiq.
10
10
  #
11
+ # @example
11
12
  # UserMailer.delay.send_welcome_email(new_user)
12
13
  # UserMailer.delay_for(5.days).send_welcome_email(new_user)
13
14
  # UserMailer.delay_until(5.days.from_now).send_welcome_email(new_user)
@@ -5,10 +5,11 @@ require "sidekiq/extensions/generic_proxy"
5
5
  module Sidekiq
6
6
  module Extensions
7
7
  ##
8
- # Adds 'delay', 'delay_for' and `delay_until` methods to ActiveRecord to offload instance method
9
- # execution to Sidekiq. Examples:
8
+ # Adds +delay+, +delay_for+ and +delay_until+ methods to ActiveRecord to offload instance method
9
+ # execution to Sidekiq.
10
10
  #
11
- # User.recent_signups.each { |user| user.delay.mark_as_awesome }
11
+ # @example
12
+ # User.recent_signups.each { |user| user.delay.mark_as_awesome }
12
13
  #
13
14
  # Please note, this is not recommended as this will serialize the entire
14
15
  # object to Redis. Your Sidekiq jobs should pass IDs, not entire instances.
@@ -5,11 +5,12 @@ require "sidekiq/extensions/generic_proxy"
5
5
  module Sidekiq
6
6
  module Extensions
7
7
  ##
8
- # Adds 'delay', 'delay_for' and `delay_until` methods to all Classes to offload class method
9
- # execution to Sidekiq. Examples:
8
+ # Adds `delay`, `delay_for` and `delay_until` methods to all Classes to offload class method
9
+ # execution to Sidekiq.
10
10
  #
11
- # User.delay.delete_inactive
12
- # Wikipedia.delay.download_changes_for(Date.today)
11
+ # @example
12
+ # User.delay.delete_inactive
13
+ # Wikipedia.delay.download_changes_for(Date.today)
13
14
  #
14
15
  class DelayedClass
15
16
  include Sidekiq::Worker
data/lib/sidekiq/fetch.rb CHANGED
@@ -14,21 +14,23 @@ module Sidekiq
14
14
  end
15
15
 
16
16
  def queue_name
17
- queue.sub(/.*queue:/, "")
17
+ queue.delete_prefix("queue:")
18
18
  end
19
19
 
20
20
  def requeue
21
21
  Sidekiq.redis do |conn|
22
- conn.rpush("queue:#{queue_name}", job)
22
+ conn.rpush(queue, job)
23
23
  end
24
24
  end
25
25
  }
26
26
 
27
27
  def initialize(options)
28
- @strictly_ordered_queues = !!options[:strict]
29
- @queues = options[:queues].map { |q| "queue:#{q}" }
28
+ raise ArgumentError, "missing queue list" unless options[:queues]
29
+ @options = options
30
+ @strictly_ordered_queues = !!@options[:strict]
31
+ @queues = @options[:queues].map { |q| "queue:#{q}" }
30
32
  if @strictly_ordered_queues
31
- @queues = @queues.uniq
33
+ @queues.uniq!
32
34
  @queues << TIMEOUT
33
35
  end
34
36
  end
@@ -38,37 +40,20 @@ module Sidekiq
38
40
  UnitOfWork.new(*work) if work
39
41
  end
40
42
 
41
- # Creating the Redis#brpop command takes into account any
42
- # configured queue weights. By default Redis#brpop returns
43
- # data from the first queue that has pending elements. We
44
- # recreate the queue command each time we invoke Redis#brpop
45
- # to honor weights and avoid queue starvation.
46
- def queues_cmd
47
- if @strictly_ordered_queues
48
- @queues
49
- else
50
- queues = @queues.shuffle.uniq
51
- queues << TIMEOUT
52
- queues
53
- end
54
- end
55
-
56
- # By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
57
- # an instance method will make it async to the Fetcher actor
58
- def self.bulk_requeue(inprogress, options)
43
+ def bulk_requeue(inprogress, options)
59
44
  return if inprogress.empty?
60
45
 
61
46
  Sidekiq.logger.debug { "Re-queueing terminated jobs" }
62
47
  jobs_to_requeue = {}
63
48
  inprogress.each do |unit_of_work|
64
- jobs_to_requeue[unit_of_work.queue_name] ||= []
65
- jobs_to_requeue[unit_of_work.queue_name] << unit_of_work.job
49
+ jobs_to_requeue[unit_of_work.queue] ||= []
50
+ jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
66
51
  end
67
52
 
68
53
  Sidekiq.redis do |conn|
69
54
  conn.pipelined do
70
55
  jobs_to_requeue.each do |queue, jobs|
71
- conn.rpush("queue:#{queue}", jobs)
56
+ conn.rpush(queue, jobs)
72
57
  end
73
58
  end
74
59
  end
@@ -76,5 +61,20 @@ module Sidekiq
76
61
  rescue => ex
77
62
  Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
78
63
  end
64
+
65
+ # Creating the Redis#brpop command takes into account any
66
+ # configured queue weights. By default Redis#brpop returns
67
+ # data from the first queue that has pending elements. We
68
+ # recreate the queue command each time we invoke Redis#brpop
69
+ # to honor weights and avoid queue starvation.
70
+ def queues_cmd
71
+ if @strictly_ordered_queues
72
+ @queues
73
+ else
74
+ queues = @queues.shuffle!.uniq
75
+ queues << TIMEOUT
76
+ queues
77
+ end
78
+ end
79
79
  end
80
80
  end
@@ -23,8 +23,15 @@ module Sidekiq
23
23
  raise
24
24
  end
25
25
 
26
- def with_job_hash_context(job_hash, &block)
27
- @logger.with_context(job_hash_context(job_hash), &block)
26
+ def prepare(job_hash, &block)
27
+ level = job_hash["log_level"]
28
+ if level
29
+ @logger.log_at(level) do
30
+ Sidekiq::Context.with(job_hash_context(job_hash), &block)
31
+ end
32
+ else
33
+ Sidekiq::Context.with(job_hash_context(job_hash), &block)
34
+ end
28
35
  end
29
36
 
30
37
  def job_hash_context(job_hash)
@@ -32,14 +39,15 @@ module Sidekiq
32
39
  # attribute to expose the underlying thing.
33
40
  h = {
34
41
  class: job_hash["wrapped"] || job_hash["class"],
35
- jid: job_hash["jid"],
42
+ jid: job_hash["jid"]
36
43
  }
37
44
  h[:bid] = job_hash["bid"] if job_hash["bid"]
45
+ h[:tags] = job_hash["tags"] if job_hash["tags"]
38
46
  h
39
47
  end
40
48
 
41
49
  def with_elapsed_time_context(start, &block)
42
- @logger.with_context(elapsed_time_context(start), &block)
50
+ Sidekiq::Context.with(elapsed_time_context(start), &block)
43
51
  end
44
52
 
45
53
  def elapsed_time_context(start)
@@ -3,6 +3,9 @@
3
3
  require "sidekiq/scheduled"
4
4
  require "sidekiq/api"
5
5
 
6
+ require "zlib"
7
+ require "base64"
8
+
6
9
  module Sidekiq
7
10
  ##
8
11
  # Automatically retry jobs that fail in Sidekiq.
@@ -71,7 +74,7 @@ module Sidekiq
71
74
  # The global retry handler requires only the barest of data.
72
75
  # We want to be able to retry as much as possible so we don't
73
76
  # require the worker to be instantiated.
74
- def global(msg, queue)
77
+ def global(jobstr, queue)
75
78
  yield
76
79
  rescue Handled => ex
77
80
  raise ex
@@ -82,6 +85,7 @@ module Sidekiq
82
85
  # ignore, will be pushed back onto queue during hard_shutdown
83
86
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
84
87
 
88
+ msg = Sidekiq.load_json(jobstr)
85
89
  if msg["retry"]
86
90
  attempt_retry(nil, msg, queue, e)
87
91
  else
@@ -103,7 +107,7 @@ module Sidekiq
103
107
  # exception so the global block does not reprocess the error. The
104
108
  # Skip exception is unwrapped within Sidekiq::Processor#process before
105
109
  # calling the handle_exception handlers.
106
- def local(worker, msg, queue)
110
+ def local(worker, jobstr, queue)
107
111
  yield
108
112
  rescue Handled => ex
109
113
  raise ex
@@ -114,6 +118,7 @@ module Sidekiq
114
118
  # ignore, will be pushed back onto queue during hard_shutdown
115
119
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
116
120
 
121
+ msg = Sidekiq.load_json(jobstr)
117
122
  if msg["retry"].nil?
118
123
  msg["retry"] = worker.class.get_sidekiq_options["retry"]
119
124
  end
@@ -151,12 +156,14 @@ module Sidekiq
151
156
  msg["retry_count"] = 0
152
157
  end
153
158
 
154
- if msg["backtrace"] == true
155
- msg["error_backtrace"] = exception.backtrace
156
- elsif !msg["backtrace"]
157
- # do nothing
158
- elsif msg["backtrace"].to_i != 0
159
- msg["error_backtrace"] = exception.backtrace[0...msg["backtrace"].to_i]
159
+ if msg["backtrace"]
160
+ lines = if msg["backtrace"] == true
161
+ exception.backtrace
162
+ else
163
+ exception.backtrace[0...msg["backtrace"].to_i]
164
+ end
165
+
166
+ msg["error_backtrace"] = compress_backtrace(lines)
160
167
  end
161
168
 
162
169
  if count < max_retry_attempts
@@ -182,13 +189,13 @@ module Sidekiq
182
189
  handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
183
190
  end
184
191
 
192
+ send_to_morgue(msg) unless msg["dead"] == false
193
+
185
194
  Sidekiq.death_handlers.each do |handler|
186
195
  handler.call(msg, exception)
187
196
  rescue => e
188
197
  handle_exception(e, {context: "Error calling death handler", job: msg})
189
198
  end
190
-
191
- send_to_morgue(msg) unless msg["dead"] == false
192
199
  end
193
200
 
194
201
  def send_to_morgue(msg)
@@ -245,5 +252,11 @@ module Sidekiq
245
252
  rescue
246
253
  +"!!! ERROR MESSAGE THREW AN ERROR !!!"
247
254
  end
255
+
256
+ def compress_backtrace(backtrace)
257
+ serialized = Sidekiq.dump_json(backtrace)
258
+ compressed = Zlib::Deflate.deflate(serialized)
259
+ Base64.encode64(compressed)
260
+ end
248
261
  end
249
262
  end
@@ -16,12 +16,13 @@ module Sidekiq
16
16
  proc { Sidekiq::VERSION },
17
17
  proc { |me, data| data["tag"] },
18
18
  proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
- proc { |me, data| "stopping" if me.stopping? },
19
+ proc { |me, data| "stopping" if me.stopping? }
20
20
  ]
21
21
 
22
22
  attr_accessor :manager, :poller, :fetcher
23
23
 
24
24
  def initialize(options)
25
+ options[:fetch] ||= BasicFetch.new(options)
25
26
  @manager = Sidekiq::Manager.new(options)
26
27
  @poller = Sidekiq::Scheduled::Poller.new
27
28
  @done = false
@@ -56,7 +57,7 @@ module Sidekiq
56
57
 
57
58
  # Requeue everything in case there was a worker who grabbed work while stopped
58
59
  # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
59
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
60
+ strategy = @options[:fetch]
60
61
  strategy.bulk_requeue([], @options)
61
62
 
62
63
  clear_heartbeat
@@ -83,7 +84,7 @@ module Sidekiq
83
84
  Sidekiq.redis do |conn|
84
85
  conn.pipelined do
85
86
  conn.srem("processes", identity)
86
- conn.del("#{identity}:workers")
87
+ conn.unlink("#{identity}:workers")
87
88
  end
88
89
  end
89
90
  rescue
@@ -96,6 +97,32 @@ module Sidekiq
96
97
 
97
98
  end
98
99
 
100
+ def self.flush_stats
101
+ fails = Processor::FAILURE.reset
102
+ procd = Processor::PROCESSED.reset
103
+ return if fails + procd == 0
104
+
105
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
106
+ begin
107
+ Sidekiq.redis do |conn|
108
+ conn.pipelined do
109
+ conn.incrby("stat:processed", procd)
110
+ conn.incrby("stat:processed:#{nowdate}", procd)
111
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
112
+
113
+ conn.incrby("stat:failed", fails)
114
+ conn.incrby("stat:failed:#{nowdate}", fails)
115
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
116
+ end
117
+ end
118
+ rescue => ex
119
+ # we're exiting the process, things might be shut down so don't
120
+ # try to handle the exception
121
+ Sidekiq.logger.warn("Unable to flush stats: #{ex}")
122
+ end
123
+ end
124
+ at_exit(&method(:flush_stats))
125
+
99
126
  def ❤
100
127
  key = identity
101
128
  fails = procd = 0
@@ -118,7 +145,7 @@ module Sidekiq
118
145
  conn.incrby("stat:failed:#{nowdate}", fails)
119
146
  conn.expire("stat:failed:#{nowdate}", STATS_TTL)
120
147
 
121
- conn.del(workers_key)
148
+ conn.unlink(workers_key)
122
149
  curstate.each_pair do |tid, hash|
123
150
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
124
151
  end
@@ -129,15 +156,13 @@ module Sidekiq
129
156
  fails = procd = 0
130
157
 
131
158
  _, exists, _, _, msg = Sidekiq.redis { |conn|
132
- res = conn.multi {
159
+ conn.multi {
133
160
  conn.sadd("processes", key)
134
- conn.exists(key)
161
+ conn.exists?(key)
135
162
  conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
136
163
  conn.expire(key, 60)
137
164
  conn.rpop("#{key}-signals")
138
165
  }
139
-
140
- res
141
166
  }
142
167
 
143
168
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
@@ -148,7 +173,7 @@ module Sidekiq
148
173
  ::Process.kill(msg, ::Process.pid)
149
174
  rescue => e
150
175
  # ignore all redis/network issues
151
- logger.error("heartbeat: #{e.message}")
176
+ logger.error("heartbeat: #{e}")
152
177
  # don't lose the counts if there was a network issue
153
178
  Processor::PROCESSED.incr(procd)
154
179
  Processor::FAILURE.incr(fails)
@@ -165,7 +190,7 @@ module Sidekiq
165
190
  "concurrency" => @options[:concurrency],
166
191
  "queues" => @options[:queues].uniq,
167
192
  "labels" => @options[:labels],
168
- "identity" => identity,
193
+ "identity" => identity
169
194
  }
170
195
  end
171
196
  end