sidekiq 6.0.5 → 6.1.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (44) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +13 -3
  3. data/Changes.md +39 -0
  4. data/Ent-Changes.md +14 -1
  5. data/Gemfile +1 -1
  6. data/Gemfile.lock +92 -92
  7. data/Pro-Changes.md +15 -1
  8. data/README.md +2 -5
  9. data/bin/sidekiq +26 -2
  10. data/lib/sidekiq.rb +9 -7
  11. data/lib/sidekiq/api.rb +2 -2
  12. data/lib/sidekiq/cli.rb +16 -6
  13. data/lib/sidekiq/client.rb +17 -10
  14. data/lib/sidekiq/extensions/active_record.rb +3 -2
  15. data/lib/sidekiq/extensions/class_methods.rb +5 -4
  16. data/lib/sidekiq/fetch.rb +20 -18
  17. data/lib/sidekiq/job_logger.rb +1 -1
  18. data/lib/sidekiq/launcher.rb +32 -5
  19. data/lib/sidekiq/logger.rb +7 -7
  20. data/lib/sidekiq/manager.rb +3 -3
  21. data/lib/sidekiq/monitor.rb +2 -2
  22. data/lib/sidekiq/processor.rb +4 -4
  23. data/lib/sidekiq/rails.rb +16 -18
  24. data/lib/sidekiq/redis_connection.rb +18 -13
  25. data/lib/sidekiq/sd_notify.rb +149 -0
  26. data/lib/sidekiq/systemd.rb +24 -0
  27. data/lib/sidekiq/testing.rb +1 -1
  28. data/lib/sidekiq/version.rb +1 -1
  29. data/lib/sidekiq/web.rb +16 -8
  30. data/lib/sidekiq/web/application.rb +3 -3
  31. data/lib/sidekiq/web/csrf_protection.rb +153 -0
  32. data/lib/sidekiq/web/helpers.rb +3 -6
  33. data/lib/sidekiq/web/router.rb +2 -4
  34. data/lib/sidekiq/worker.rb +4 -7
  35. data/sidekiq.gemspec +1 -2
  36. data/web/assets/javascripts/application.js +24 -21
  37. data/web/assets/stylesheets/application-dark.css +132 -124
  38. data/web/assets/stylesheets/application.css +5 -0
  39. data/web/locales/fr.yml +2 -2
  40. data/web/locales/lt.yml +83 -0
  41. data/web/locales/pl.yml +4 -4
  42. data/web/locales/vi.yml +83 -0
  43. data/web/views/layout.erb +1 -1
  44. metadata +10 -19
data/README.md CHANGED
@@ -2,11 +2,8 @@ Sidekiq
2
2
  ==============
3
3
 
4
4
  [![Gem Version](https://badge.fury.io/rb/sidekiq.svg)](https://rubygems.org/gems/sidekiq)
5
- [![Code Climate](https://codeclimate.com/github/mperham/sidekiq.svg)](https://codeclimate.com/github/mperham/sidekiq)
6
- [![Test Coverage](https://codeclimate.com/github/mperham/sidekiq/badges/coverage.svg)](https://codeclimate.com/github/mperham/sidekiq/coverage)
5
+ [![Codecov](https://codecov.io/gh/mperham/sidekiq/branch/master/graph/badge.svg)](https://codecov.io/gh/mperham/sidekiq)
7
6
  [![Build Status](https://circleci.com/gh/mperham/sidekiq/tree/master.svg?style=svg)](https://circleci.com/gh/mperham/sidekiq/tree/master)
8
- [![Gitter Chat](https://badges.gitter.im/mperham/sidekiq.svg)](https://gitter.im/mperham/sidekiq)
9
-
10
7
 
11
8
  Simple, efficient background processing for Ruby.
12
9
 
@@ -94,4 +91,4 @@ Please see [LICENSE](https://github.com/mperham/sidekiq/blob/master/LICENSE) for
94
91
  Author
95
92
  -----------------
96
93
 
97
- Mike Perham, [@mperham@mastodon.xyz](https://mastodon.xyz/@mperham) / [@sidekiq](https://twitter.com/sidekiq), [https://www.mikeperham.com](https://www.mikeperham.com) / [https://www.contribsys.com](https://www.contribsys.com)
94
+ Mike Perham, [@getajobmike](https://twitter.com/getajobmike) / [@sidekiq](https://twitter.com/sidekiq), [https://www.mikeperham.com](https://www.mikeperham.com) / [https://www.contribsys.com](https://www.contribsys.com)
@@ -6,13 +6,37 @@ $TESTING = false
6
6
 
7
7
  require_relative '../lib/sidekiq/cli'
8
8
 
9
+ def integrate_with_systemd
10
+ return unless ENV["NOTIFY_SOCKET"]
11
+
12
+ Sidekiq.configure_server do |config|
13
+ Sidekiq.logger.info "Enabling systemd notification integration"
14
+ require "sidekiq/sd_notify"
15
+ config.on(:startup) do
16
+ Sidekiq::SdNotify.ready
17
+ end
18
+ config.on(:shutdown) do
19
+ Sidekiq::SdNotify.stopping
20
+ end
21
+ Sidekiq.start_watchdog if Sidekiq::SdNotify.watchdog?
22
+ end
23
+ end
24
+
9
25
  begin
10
26
  cli = Sidekiq::CLI.instance
11
27
  cli.parse
28
+
29
+ integrate_with_systemd
30
+
12
31
  cli.run
13
32
  rescue => e
14
33
  raise e if $DEBUG
15
- STDERR.puts e.message
16
- STDERR.puts e.backtrace.join("\n")
34
+ if Sidekiq.error_handlers.length == 0
35
+ STDERR.puts e.message
36
+ STDERR.puts e.backtrace.join("\n")
37
+ else
38
+ cli.handle_exception e
39
+ end
40
+
17
41
  exit 1
18
42
  end
@@ -20,6 +20,7 @@ module Sidekiq
20
20
  labels: [],
21
21
  concurrency: 10,
22
22
  require: ".",
23
+ strict: true,
23
24
  environment: nil,
24
25
  timeout: 25,
25
26
  poll_interval_average: nil,
@@ -30,16 +31,16 @@ module Sidekiq
30
31
  startup: [],
31
32
  quiet: [],
32
33
  shutdown: [],
33
- heartbeat: [],
34
+ heartbeat: []
34
35
  },
35
36
  dead_max_jobs: 10_000,
36
37
  dead_timeout_in_seconds: 180 * 24 * 60 * 60, # 6 months
37
- reloader: proc { |&block| block.call },
38
+ reloader: proc { |&block| block.call }
38
39
  }
39
40
 
40
41
  DEFAULT_WORKER_OPTIONS = {
41
42
  "retry" => true,
42
- "queue" => "default",
43
+ "queue" => "default"
43
44
  }
44
45
 
45
46
  FAKE_INFO = {
@@ -47,7 +48,7 @@ module Sidekiq
47
48
  "uptime_in_days" => "9999",
48
49
  "connected_clients" => "9999",
49
50
  "used_memory_human" => "9P",
50
- "used_memory_peak_human" => "9P",
51
+ "used_memory_peak_human" => "9P"
51
52
  }
52
53
 
53
54
  def self.❨╯°□°❩╯︵┻━┻
@@ -95,10 +96,11 @@ module Sidekiq
95
96
  retryable = true
96
97
  begin
97
98
  yield conn
98
- rescue Redis::CommandError => ex
99
+ rescue Redis::BaseError => ex
99
100
  # 2550 Failover can cause the server to become a replica, need
100
101
  # to disconnect and reopen the socket to get back to the primary.
101
- if retryable && ex.message =~ /READONLY/
102
+ # 4495 Use the same logic if we have a "Not enough replicas" error from the primary
103
+ if retryable && ex.message =~ /READONLY|NOREPLICAS/
102
104
  conn.disconnect!
103
105
  retryable = false
104
106
  retry
@@ -154,7 +156,7 @@ module Sidekiq
154
156
 
155
157
  def self.default_worker_options=(hash)
156
158
  # stringify
157
- @default_worker_options = default_worker_options.merge(Hash[hash.map { |k, v| [k.to_s, v] }])
159
+ @default_worker_options = default_worker_options.merge(hash.transform_keys(&:to_s))
158
160
  end
159
161
 
160
162
  def self.default_worker_options
@@ -105,7 +105,7 @@ module Sidekiq
105
105
 
106
106
  default_queue_latency: default_queue_latency,
107
107
  workers_size: workers_size,
108
- enqueued: enqueued,
108
+ enqueued: enqueued
109
109
  }
110
110
  end
111
111
 
@@ -921,7 +921,7 @@ module Sidekiq
921
921
  procs = conn.sscan_each("processes").to_a
922
922
  procs.sort.each do |key|
923
923
  valid, workers = conn.pipelined {
924
- conn.exists(key)
924
+ conn.exists?(key)
925
925
  conn.hgetall("#{key}:workers")
926
926
  }
927
927
  next unless valid
@@ -54,7 +54,7 @@ module Sidekiq
54
54
 
55
55
  logger.info "Running in #{RUBY_DESCRIPTION}"
56
56
  logger.info Sidekiq::LICENSE
57
- logger.info "Upgrade to Sidekiq Pro for more features and support: http://sidekiq.org" unless defined?(::Sidekiq::Pro)
57
+ logger.info "Upgrade to Sidekiq Pro for more features and support: https://sidekiq.org" unless defined?(::Sidekiq::Pro)
58
58
 
59
59
  # touch the connection pool so it is created before we
60
60
  # fire startup and start multithreading.
@@ -163,7 +163,7 @@ module Sidekiq
163
163
  Sidekiq.logger.warn "<no backtrace available>"
164
164
  end
165
165
  end
166
- },
166
+ }
167
167
  }
168
168
  UNHANDLED_SIGNAL_HANDLER = ->(cli) { Sidekiq.logger.info "No signal handler registered, ignoring" }
169
169
  SIGNAL_HANDLERS.default = UNHANDLED_SIGNAL_HANDLER
@@ -185,8 +185,8 @@ module Sidekiq
185
185
  # See #984 for discussion.
186
186
  # APP_ENV is now the preferred ENV term since it is not tech-specific.
187
187
  # Both Sinatra 2.0+ and Sidekiq support this term.
188
- # RACK_ENV and RAILS_ENV are there for legacy support.
189
- @environment = cli_env || ENV["APP_ENV"] || ENV["RACK_ENV"] || ENV["RAILS_ENV"] || "development"
188
+ # RAILS_ENV and RACK_ENV are there for legacy support.
189
+ @environment = cli_env || ENV["APP_ENV"] || ENV["RAILS_ENV"] || ENV["RACK_ENV"] || "development"
190
190
  end
191
191
 
192
192
  def symbolize_keys_deep!(hash)
@@ -228,8 +228,7 @@ module Sidekiq
228
228
  opts = parse_config(opts[:config_file]).merge(opts) if opts[:config_file]
229
229
 
230
230
  # set defaults
231
- opts[:queues] = ["default"] if opts[:queues].nil? || opts[:queues].empty?
232
- opts[:strict] = true if opts[:strict].nil?
231
+ opts[:queues] = ["default"] if opts[:queues].nil?
233
232
  opts[:concurrency] = Integer(ENV["RAILS_MAX_THREADS"]) if opts[:concurrency].nil? && ENV["RAILS_MAX_THREADS"]
234
233
 
235
234
  # merge with defaults
@@ -358,6 +357,12 @@ module Sidekiq
358
357
  Sidekiq.logger.level = ::Logger::DEBUG if options[:verbose]
359
358
  end
360
359
 
360
+ INTERNAL_OPTIONS = [
361
+ # These are options that are set internally and cannot be
362
+ # set via the config file or command line arguments.
363
+ :strict
364
+ ]
365
+
361
366
  def parse_config(path)
362
367
  opts = YAML.load(ERB.new(File.read(path)).result) || {}
363
368
 
@@ -368,6 +373,8 @@ module Sidekiq
368
373
  end
369
374
 
370
375
  opts = opts.merge(opts.delete(environment.to_sym) || {})
376
+ opts.delete(*INTERNAL_OPTIONS)
377
+
371
378
  parse_queues(opts, opts.delete(:queues) || [])
372
379
 
373
380
  opts
@@ -379,6 +386,7 @@ module Sidekiq
379
386
 
380
387
  def parse_queue(opts, queue, weight = nil)
381
388
  opts[:queues] ||= []
389
+ opts[:strict] = true if opts[:strict].nil?
382
390
  raise ArgumentError, "queues: #{queue} cannot be defined twice" if opts[:queues].include?(queue)
383
391
  [weight.to_i, 1].max.times { opts[:queues] << queue }
384
392
  opts[:strict] = false if weight.to_i > 0
@@ -389,3 +397,5 @@ module Sidekiq
389
397
  end
390
398
  end
391
399
  end
400
+
401
+ require "sidekiq/systemd"
@@ -90,16 +90,17 @@ module Sidekiq
90
90
  # Returns an array of the of pushed jobs' jids. The number of jobs pushed can be less
91
91
  # than the number given if the middleware stopped processing for one or more jobs.
92
92
  def push_bulk(items)
93
- arg = items["args"].first
94
- return [] unless arg # no jobs to push
95
- raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless arg.is_a?(Array)
93
+ args = items["args"]
94
+ raise ArgumentError, "Bulk arguments must be an Array of Arrays: [[1], [2]]" unless args.is_a?(Array) && args.all?(Array)
95
+ return [] if args.empty? # no jobs to push
96
96
 
97
97
  at = items.delete("at")
98
98
  raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all?(Numeric))
99
+ raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
99
100
 
100
101
  normed = normalize_item(items)
101
- payloads = items["args"].map.with_index { |args, index|
102
- copy = normed.merge("args" => args, "jid" => SecureRandom.hex(12), "enqueued_at" => Time.now.to_f)
102
+ payloads = args.map.with_index { |job_args, index|
103
+ copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12), "enqueued_at" => Time.now.to_f)
103
104
  copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
104
105
 
105
106
  result = process_single(items["class"], copy)
@@ -218,16 +219,20 @@ module Sidekiq
218
219
  end
219
220
  end
220
221
 
222
+ def validate(item)
223
+ raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
224
+ raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
225
+ raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
226
+ raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
227
+ raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
228
+ end
229
+
221
230
  def normalize_item(item)
222
231
  # 6.0.0 push_bulk bug, #4321
223
232
  # TODO Remove after a while...
224
233
  item.delete("at") if item.key?("at") && item["at"].nil?
225
234
 
226
- raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: { 'class' => SomeWorker, 'args' => ['bob', 1, :foo => 'bar'] }") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
227
- raise(ArgumentError, "Job args must be an Array") unless item["args"].is_a?(Array)
228
- raise(ArgumentError, "Job class must be either a Class or String representation of the class name") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
229
- raise(ArgumentError, "Job 'at' must be a Numeric timestamp") if item.key?("at") && !item["at"].is_a?(Numeric)
230
- raise(ArgumentError, "Job tags must be an Array") if item["tags"] && !item["tags"].is_a?(Array)
235
+ validate(item)
231
236
  # raise(ArgumentError, "Arguments must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices") unless JSON.load(JSON.dump(item['args'])) == item['args']
232
237
 
233
238
  # merge in the default sidekiq_options for the item's class and/or wrapped element
@@ -236,6 +241,8 @@ module Sidekiq
236
241
  defaults = defaults.merge(item["wrapped"].get_sidekiq_options) if item["wrapped"].respond_to?("get_sidekiq_options")
237
242
  item = defaults.merge(item)
238
243
 
244
+ raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
245
+
239
246
  item["class"] = item["class"].to_s
240
247
  item["queue"] = item["queue"].to_s
241
248
  item["jid"] ||= SecureRandom.hex(12)
@@ -6,9 +6,10 @@ module Sidekiq
6
6
  module Extensions
7
7
  ##
8
8
  # Adds 'delay', 'delay_for' and `delay_until` methods to ActiveRecord to offload instance method
9
- # execution to Sidekiq. Examples:
9
+ # execution to Sidekiq.
10
10
  #
11
- # User.recent_signups.each { |user| user.delay.mark_as_awesome }
11
+ # @example
12
+ # User.recent_signups.each { |user| user.delay.mark_as_awesome }
12
13
  #
13
14
  # Please note, this is not recommended as this will serialize the entire
14
15
  # object to Redis. Your Sidekiq jobs should pass IDs, not entire instances.
@@ -5,11 +5,12 @@ require "sidekiq/extensions/generic_proxy"
5
5
  module Sidekiq
6
6
  module Extensions
7
7
  ##
8
- # Adds 'delay', 'delay_for' and `delay_until` methods to all Classes to offload class method
9
- # execution to Sidekiq. Examples:
8
+ # Adds `delay`, `delay_for` and `delay_until` methods to all Classes to offload class method
9
+ # execution to Sidekiq.
10
10
  #
11
- # User.delay.delete_inactive
12
- # Wikipedia.delay.download_changes_for(Date.today)
11
+ # @example
12
+ # User.delay.delete_inactive
13
+ # Wikipedia.delay.download_changes_for(Date.today)
13
14
  #
14
15
  class DelayedClass
15
16
  include Sidekiq::Worker
@@ -25,8 +25,10 @@ module Sidekiq
25
25
  }
26
26
 
27
27
  def initialize(options)
28
- @strictly_ordered_queues = !!options[:strict]
29
- @queues = options[:queues].map { |q| "queue:#{q}" }
28
+ raise ArgumentError, "missing queue list" unless options[:queues]
29
+ @options = options
30
+ @strictly_ordered_queues = !!@options[:strict]
31
+ @queues = @options[:queues].map { |q| "queue:#{q}" }
30
32
  if @strictly_ordered_queues
31
33
  @queues.uniq!
32
34
  @queues << TIMEOUT
@@ -38,24 +40,9 @@ module Sidekiq
38
40
  UnitOfWork.new(*work) if work
39
41
  end
40
42
 
41
- # Creating the Redis#brpop command takes into account any
42
- # configured queue weights. By default Redis#brpop returns
43
- # data from the first queue that has pending elements. We
44
- # recreate the queue command each time we invoke Redis#brpop
45
- # to honor weights and avoid queue starvation.
46
- def queues_cmd
47
- if @strictly_ordered_queues
48
- @queues
49
- else
50
- queues = @queues.shuffle!.uniq
51
- queues << TIMEOUT
52
- queues
53
- end
54
- end
55
-
56
43
  # By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
57
44
  # an instance method will make it async to the Fetcher actor
58
- def self.bulk_requeue(inprogress, options)
45
+ def bulk_requeue(inprogress, options)
59
46
  return if inprogress.empty?
60
47
 
61
48
  Sidekiq.logger.debug { "Re-queueing terminated jobs" }
@@ -76,5 +63,20 @@ module Sidekiq
76
63
  rescue => ex
77
64
  Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
78
65
  end
66
+
67
+ # Creating the Redis#brpop command takes into account any
68
+ # configured queue weights. By default Redis#brpop returns
69
+ # data from the first queue that has pending elements. We
70
+ # recreate the queue command each time we invoke Redis#brpop
71
+ # to honor weights and avoid queue starvation.
72
+ def queues_cmd
73
+ if @strictly_ordered_queues
74
+ @queues
75
+ else
76
+ queues = @queues.shuffle!.uniq
77
+ queues << TIMEOUT
78
+ queues
79
+ end
80
+ end
79
81
  end
80
82
  end
@@ -39,7 +39,7 @@ module Sidekiq
39
39
  # attribute to expose the underlying thing.
40
40
  h = {
41
41
  class: job_hash["wrapped"] || job_hash["class"],
42
- jid: job_hash["jid"],
42
+ jid: job_hash["jid"]
43
43
  }
44
44
  h[:bid] = job_hash["bid"] if job_hash["bid"]
45
45
  h[:tags] = job_hash["tags"] if job_hash["tags"]
@@ -16,12 +16,13 @@ module Sidekiq
16
16
  proc { Sidekiq::VERSION },
17
17
  proc { |me, data| data["tag"] },
18
18
  proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
- proc { |me, data| "stopping" if me.stopping? },
19
+ proc { |me, data| "stopping" if me.stopping? }
20
20
  ]
21
21
 
22
22
  attr_accessor :manager, :poller, :fetcher
23
23
 
24
24
  def initialize(options)
25
+ options[:fetch] ||= BasicFetch.new(options)
25
26
  @manager = Sidekiq::Manager.new(options)
26
27
  @poller = Sidekiq::Scheduled::Poller.new
27
28
  @done = false
@@ -56,7 +57,7 @@ module Sidekiq
56
57
 
57
58
  # Requeue everything in case there was a worker who grabbed work while stopped
58
59
  # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
59
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
60
+ strategy = @options[:fetch]
60
61
  strategy.bulk_requeue([], @options)
61
62
 
62
63
  clear_heartbeat
@@ -96,6 +97,32 @@ module Sidekiq
96
97
 
97
98
  end
98
99
 
100
+ def self.flush_stats
101
+ fails = Processor::FAILURE.reset
102
+ procd = Processor::PROCESSED.reset
103
+ return if fails + procd == 0
104
+
105
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
106
+ begin
107
+ Sidekiq.redis do |conn|
108
+ conn.pipelined do
109
+ conn.incrby("stat:processed", procd)
110
+ conn.incrby("stat:processed:#{nowdate}", procd)
111
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
112
+
113
+ conn.incrby("stat:failed", fails)
114
+ conn.incrby("stat:failed:#{nowdate}", fails)
115
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
116
+ end
117
+ end
118
+ rescue => ex
119
+ # we're exiting the process, things might be shut down so don't
120
+ # try to handle the exception
121
+ Sidekiq.logger.warn("Unable to flush stats: #{ex}")
122
+ end
123
+ end
124
+ at_exit(&method(:flush_stats))
125
+
99
126
  def ❤
100
127
  key = identity
101
128
  fails = procd = 0
@@ -131,7 +158,7 @@ module Sidekiq
131
158
  _, exists, _, _, msg = Sidekiq.redis { |conn|
132
159
  conn.multi {
133
160
  conn.sadd("processes", key)
134
- conn.exists(key)
161
+ conn.exists?(key)
135
162
  conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
136
163
  conn.expire(key, 60)
137
164
  conn.rpop("#{key}-signals")
@@ -146,7 +173,7 @@ module Sidekiq
146
173
  ::Process.kill(msg, ::Process.pid)
147
174
  rescue => e
148
175
  # ignore all redis/network issues
149
- logger.error("heartbeat: #{e.message}")
176
+ logger.error("heartbeat: #{e}")
150
177
  # don't lose the counts if there was a network issue
151
178
  Processor::PROCESSED.incr(procd)
152
179
  Processor::FAILURE.incr(fails)
@@ -163,7 +190,7 @@ module Sidekiq
163
190
  "concurrency" => @options[:concurrency],
164
191
  "queues" => @options[:queues].uniq,
165
192
  "labels" => @options[:labels],
166
- "identity" => identity,
193
+ "identity" => identity
167
194
  }
168
195
  end
169
196
  end