sidekiq 5.0.0 → 6.0.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (79) hide show
  1. checksums.yaml +5 -5
  2. data/.circleci/config.yml +61 -0
  3. data/.github/issue_template.md +3 -1
  4. data/.gitignore +1 -1
  5. data/.standard.yml +20 -0
  6. data/6.0-Upgrade.md +70 -0
  7. data/COMM-LICENSE +12 -10
  8. data/Changes.md +169 -1
  9. data/Ent-2.0-Upgrade.md +37 -0
  10. data/Ent-Changes.md +76 -0
  11. data/Gemfile +16 -21
  12. data/Gemfile.lock +196 -0
  13. data/LICENSE +1 -1
  14. data/Pro-4.0-Upgrade.md +35 -0
  15. data/Pro-5.0-Upgrade.md +25 -0
  16. data/Pro-Changes.md +137 -1
  17. data/README.md +18 -30
  18. data/Rakefile +6 -8
  19. data/bin/sidekiqload +28 -24
  20. data/bin/sidekiqmon +9 -0
  21. data/lib/generators/sidekiq/templates/worker_spec.rb.erb +1 -1
  22. data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
  23. data/lib/generators/sidekiq/worker_generator.rb +12 -14
  24. data/lib/sidekiq.rb +69 -49
  25. data/lib/sidekiq/api.rb +216 -160
  26. data/lib/sidekiq/cli.rb +174 -207
  27. data/lib/sidekiq/client.rb +55 -51
  28. data/lib/sidekiq/delay.rb +24 -4
  29. data/lib/sidekiq/exception_handler.rb +12 -16
  30. data/lib/sidekiq/extensions/action_mailer.rb +10 -20
  31. data/lib/sidekiq/extensions/active_record.rb +9 -7
  32. data/lib/sidekiq/extensions/class_methods.rb +9 -7
  33. data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
  34. data/lib/sidekiq/fetch.rb +5 -6
  35. data/lib/sidekiq/job_logger.rb +42 -14
  36. data/lib/sidekiq/job_retry.rb +71 -57
  37. data/lib/sidekiq/launcher.rb +74 -60
  38. data/lib/sidekiq/logger.rb +69 -0
  39. data/lib/sidekiq/manager.rb +12 -15
  40. data/lib/sidekiq/middleware/chain.rb +3 -2
  41. data/lib/sidekiq/middleware/i18n.rb +5 -7
  42. data/lib/sidekiq/monitor.rb +148 -0
  43. data/lib/sidekiq/paginator.rb +11 -12
  44. data/lib/sidekiq/processor.rb +126 -82
  45. data/lib/sidekiq/rails.rb +24 -32
  46. data/lib/sidekiq/redis_connection.rb +46 -14
  47. data/lib/sidekiq/scheduled.rb +50 -25
  48. data/lib/sidekiq/testing.rb +35 -27
  49. data/lib/sidekiq/testing/inline.rb +2 -1
  50. data/lib/sidekiq/util.rb +20 -14
  51. data/lib/sidekiq/version.rb +2 -1
  52. data/lib/sidekiq/web.rb +45 -53
  53. data/lib/sidekiq/web/action.rb +14 -10
  54. data/lib/sidekiq/web/application.rb +83 -58
  55. data/lib/sidekiq/web/helpers.rb +105 -67
  56. data/lib/sidekiq/web/router.rb +18 -15
  57. data/lib/sidekiq/worker.rb +144 -41
  58. data/sidekiq.gemspec +16 -27
  59. data/web/assets/javascripts/application.js +0 -0
  60. data/web/assets/javascripts/dashboard.js +21 -23
  61. data/web/assets/stylesheets/application.css +35 -2
  62. data/web/assets/stylesheets/bootstrap.css +2 -2
  63. data/web/locales/ar.yml +1 -0
  64. data/web/locales/en.yml +2 -0
  65. data/web/locales/es.yml +4 -3
  66. data/web/locales/ja.yml +7 -4
  67. data/web/views/_footer.erb +4 -1
  68. data/web/views/_nav.erb +3 -17
  69. data/web/views/busy.erb +5 -1
  70. data/web/views/layout.erb +1 -1
  71. data/web/views/queue.erb +1 -0
  72. data/web/views/queues.erb +2 -0
  73. data/web/views/retries.erb +4 -0
  74. metadata +25 -171
  75. data/.travis.yml +0 -18
  76. data/bin/sidekiqctl +0 -99
  77. data/lib/sidekiq/core_ext.rb +0 -119
  78. data/lib/sidekiq/logging.rb +0 -106
  79. data/lib/sidekiq/middleware/server/active_record.rb +0 -22
@@ -1,5 +1,6 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq'
2
+
3
+ require "sidekiq"
3
4
 
4
5
  module Sidekiq
5
6
  class BasicFetch
@@ -7,13 +8,13 @@ module Sidekiq
7
8
  # can check if the process is shutting down.
8
9
  TIMEOUT = 2
9
10
 
10
- UnitOfWork = Struct.new(:queue, :job) do
11
+ UnitOfWork = Struct.new(:queue, :job) {
11
12
  def acknowledge
12
13
  # nothing to do
13
14
  end
14
15
 
15
16
  def queue_name
16
- queue.sub(/.*queue:/, ''.freeze)
17
+ queue.sub(/.*queue:/, "")
17
18
  end
18
19
 
19
20
  def requeue
@@ -21,7 +22,7 @@ module Sidekiq
21
22
  conn.rpush("queue:#{queue_name}", job)
22
23
  end
23
24
  end
24
- end
25
+ }
25
26
 
26
27
  def initialize(options)
27
28
  @strictly_ordered_queues = !!options[:strict]
@@ -52,7 +53,6 @@ module Sidekiq
52
53
  end
53
54
  end
54
55
 
55
-
56
56
  # By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
57
57
  # an instance method will make it async to the Fetcher actor
58
58
  def self.bulk_requeue(inprogress, options)
@@ -76,6 +76,5 @@ module Sidekiq
76
76
  rescue => ex
77
77
  Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
78
78
  end
79
-
80
79
  end
81
80
  end
@@ -1,27 +1,55 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Sidekiq
2
4
  class JobLogger
5
+ def initialize(logger = Sidekiq.logger)
6
+ @logger = logger
7
+ end
3
8
 
4
9
  def call(item, queue)
5
- begin
6
- start = Time.now
7
- logger.info("start".freeze)
8
- yield
9
- logger.info("done: #{elapsed(start)} sec")
10
- rescue Exception
11
- logger.info("fail: #{elapsed(start)} sec")
12
- raise
10
+ start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
11
+ @logger.info("start")
12
+
13
+ yield
14
+
15
+ with_elapsed_time_context(start) do
16
+ @logger.info("done")
17
+ end
18
+ rescue Exception
19
+ with_elapsed_time_context(start) do
20
+ @logger.info("fail")
13
21
  end
22
+
23
+ raise
14
24
  end
15
25
 
16
- private
26
+ def with_job_hash_context(job_hash, &block)
27
+ @logger.with_context(job_hash_context(job_hash), &block)
28
+ end
17
29
 
18
- def elapsed(start)
19
- (Time.now - start).round(3)
30
+ def job_hash_context(job_hash)
31
+ # If we're using a wrapper class, like ActiveJob, use the "wrapped"
32
+ # attribute to expose the underlying thing.
33
+ h = {
34
+ class: job_hash["wrapped"] || job_hash["class"],
35
+ jid: job_hash["jid"],
36
+ }
37
+ h[:bid] = job_hash["bid"] if job_hash["bid"]
38
+ h
20
39
  end
21
40
 
22
- def logger
23
- Sidekiq.logger
41
+ def with_elapsed_time_context(start, &block)
42
+ @logger.with_context(elapsed_time_context(start), &block)
43
+ end
44
+
45
+ def elapsed_time_context(start)
46
+ {elapsed: elapsed(start).to_s}
47
+ end
48
+
49
+ private
50
+
51
+ def elapsed(start)
52
+ (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
24
53
  end
25
54
  end
26
55
  end
27
-
@@ -1,5 +1,7 @@
1
- require 'sidekiq/scheduled'
2
- require 'sidekiq/api'
1
+ # frozen_string_literal: true
2
+
3
+ require "sidekiq/scheduled"
4
+ require "sidekiq/api"
3
5
 
4
6
  module Sidekiq
5
7
  ##
@@ -55,7 +57,8 @@ module Sidekiq
55
57
  # end
56
58
  #
57
59
  class JobRetry
58
- class Skip < ::RuntimeError; end
60
+ class Handled < ::RuntimeError; end
61
+ class Skip < Handled; end
59
62
 
60
63
  include Sidekiq::Util
61
64
 
@@ -70,7 +73,7 @@ module Sidekiq
70
73
  # require the worker to be instantiated.
71
74
  def global(msg, queue)
72
75
  yield
73
- rescue Skip => ex
76
+ rescue Handled => ex
74
77
  raise ex
75
78
  rescue Sidekiq::Shutdown => ey
76
79
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -79,11 +82,18 @@ module Sidekiq
79
82
  # ignore, will be pushed back onto queue during hard_shutdown
80
83
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
81
84
 
82
- raise e unless msg['retry']
83
- attempt_retry(nil, msg, queue, e)
84
- raise e
85
- end
85
+ if msg["retry"]
86
+ attempt_retry(nil, msg, queue, e)
87
+ else
88
+ Sidekiq.death_handlers.each do |handler|
89
+ handler.call(msg, e)
90
+ rescue => handler_ex
91
+ handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
92
+ end
93
+ end
86
94
 
95
+ raise Handled
96
+ end
87
97
 
88
98
  # The local retry support means that any errors that occur within
89
99
  # this block can be associated with the given worker instance.
@@ -95,7 +105,7 @@ module Sidekiq
95
105
  # calling the handle_exception handlers.
96
106
  def local(worker, msg, queue)
97
107
  yield
98
- rescue Skip => ex
108
+ rescue Handled => ex
99
109
  raise ex
100
110
  rescue Sidekiq::Shutdown => ey
101
111
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -104,11 +114,11 @@ module Sidekiq
104
114
  # ignore, will be pushed back onto queue during hard_shutdown
105
115
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
106
116
 
107
- if msg['retry'] == nil
108
- msg['retry'] = worker.class.get_sidekiq_options['retry']
117
+ if msg["retry"].nil?
118
+ msg["retry"] = worker.class.get_sidekiq_options["retry"]
109
119
  end
110
120
 
111
- raise e unless msg['retry']
121
+ raise e unless msg["retry"]
112
122
  attempt_retry(worker, msg, queue, e)
113
123
  # We've handled this error associated with this job, don't
114
124
  # need to handle it at the global level
@@ -121,47 +131,42 @@ module Sidekiq
121
131
  # instantiate the worker instance. All access must be guarded and
122
132
  # best effort.
123
133
  def attempt_retry(worker, msg, queue, exception)
124
- max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
134
+ max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
125
135
 
126
- msg['queue'] = if msg['retry_queue']
127
- msg['retry_queue']
128
- else
129
- queue
130
- end
136
+ msg["queue"] = (msg["retry_queue"] || queue)
131
137
 
132
- # App code can stuff all sorts of crazy binary data into the error message
133
- # that won't convert to JSON.
134
- m = exception.message.to_s[0, 10_000]
138
+ m = exception_message(exception)
135
139
  if m.respond_to?(:scrub!)
136
140
  m.force_encoding("utf-8")
137
141
  m.scrub!
138
142
  end
139
143
 
140
- msg['error_message'] = m
141
- msg['error_class'] = exception.class.name
142
- count = if msg['retry_count']
143
- msg['retried_at'] = Time.now.to_f
144
- msg['retry_count'] += 1
144
+ msg["error_message"] = m
145
+ msg["error_class"] = exception.class.name
146
+ count = if msg["retry_count"]
147
+ msg["retried_at"] = Time.now.to_f
148
+ msg["retry_count"] += 1
145
149
  else
146
- msg['failed_at'] = Time.now.to_f
147
- msg['retry_count'] = 0
150
+ msg["failed_at"] = Time.now.to_f
151
+ msg["retry_count"] = 0
148
152
  end
149
153
 
150
- if msg['backtrace'] == true
151
- msg['error_backtrace'] = exception.backtrace
152
- elsif !msg['backtrace']
154
+ if msg["backtrace"] == true
155
+ msg["error_backtrace"] = exception.backtrace
156
+ elsif !msg["backtrace"]
153
157
  # do nothing
154
- elsif msg['backtrace'].to_i != 0
155
- msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
158
+ elsif msg["backtrace"].to_i != 0
159
+ msg["error_backtrace"] = exception.backtrace[0...msg["backtrace"].to_i]
156
160
  end
157
161
 
158
162
  if count < max_retry_attempts
159
163
  delay = delay_for(worker, count, exception)
160
- logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
164
+ # Logging here can break retries if the logging device raises ENOSPC #3979
165
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
161
166
  retry_at = Time.now.to_f + delay
162
167
  payload = Sidekiq.dump_json(msg)
163
168
  Sidekiq.redis do |conn|
164
- conn.zadd('retry', retry_at.to_s, payload)
169
+ conn.zadd("retry", retry_at.to_s, payload)
165
170
  end
166
171
  else
167
172
  # Goodbye dear message, you (re)tried your best I'm sure.
@@ -170,28 +175,26 @@ module Sidekiq
170
175
  end
171
176
 
172
177
  def retries_exhausted(worker, msg, exception)
173
- logger.debug { "Retries exhausted for job" }
174
178
  begin
175
- block = worker && worker.sidekiq_retries_exhausted_block || Sidekiq.default_retries_exhausted
176
- block.call(msg, exception) if block
179
+ block = worker&.sidekiq_retries_exhausted_block
180
+ block&.call(msg, exception)
181
+ rescue => e
182
+ handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
183
+ end
184
+
185
+ Sidekiq.death_handlers.each do |handler|
186
+ handler.call(msg, exception)
177
187
  rescue => e
178
- handle_exception(e, { context: "Error calling retries_exhausted for #{msg['class']}", job: msg })
188
+ handle_exception(e, {context: "Error calling death handler", job: msg})
179
189
  end
180
190
 
181
- send_to_morgue(msg) unless msg['dead'] == false
191
+ send_to_morgue(msg) unless msg["dead"] == false
182
192
  end
183
193
 
184
194
  def send_to_morgue(msg)
185
- Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
195
+ logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
186
196
  payload = Sidekiq.dump_json(msg)
187
- now = Time.now.to_f
188
- Sidekiq.redis do |conn|
189
- conn.multi do
190
- conn.zadd('dead', now, payload)
191
- conn.zremrangebyscore('dead', '-inf', now - DeadSet.timeout)
192
- conn.zremrangebyrank('dead', 0, -DeadSet.max_jobs)
193
- end
194
- end
197
+ DeadSet.new.kill(payload, notify_failure: false)
195
198
  end
196
199
 
197
200
  def retry_attempts_from(msg_retry, default)
@@ -203,21 +206,23 @@ module Sidekiq
203
206
  end
204
207
 
205
208
  def delay_for(worker, count, exception)
206
- worker && worker.sidekiq_retry_in_block? && retry_in(worker, count, exception) || seconds_to_delay(count)
209
+ if worker&.sidekiq_retry_in_block
210
+ custom_retry_in = retry_in(worker, count, exception).to_i
211
+ return custom_retry_in if custom_retry_in > 0
212
+ end
213
+ seconds_to_delay(count)
207
214
  end
208
215
 
209
216
  # delayed_job uses the same basic formula
210
217
  def seconds_to_delay(count)
211
- (count ** 4) + 15 + (rand(30)*(count+1))
218
+ (count**4) + 15 + (rand(30) * (count + 1))
212
219
  end
213
220
 
214
221
  def retry_in(worker, count, exception)
215
- begin
216
- worker.sidekiq_retry_in_block.call(count, exception).to_i
217
- rescue Exception => e
218
- handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
219
- nil
220
- end
222
+ worker.sidekiq_retry_in_block.call(count, exception)
223
+ rescue Exception => e
224
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
225
+ nil
221
226
  end
222
227
 
223
228
  def exception_caused_by_shutdown?(e, checked_causes = [])
@@ -231,5 +236,14 @@ module Sidekiq
231
236
  exception_caused_by_shutdown?(e.cause, checked_causes)
232
237
  end
233
238
 
239
+ # Extract message from exception.
240
+ # Set a default if the message raises an error
241
+ def exception_message(exception)
242
+ # App code can stuff all sorts of crazy binary data into the error message
243
+ # that won't convert to JSON.
244
+ exception.message.to_s[0, 10_000]
245
+ rescue
246
+ +"!!! ERROR MESSAGE THREW AN ERROR !!!"
247
+ end
234
248
  end
235
249
  end
@@ -1,17 +1,24 @@
1
- # encoding: utf-8
2
1
  # frozen_string_literal: true
3
- require 'sidekiq/manager'
4
- require 'sidekiq/fetch'
5
- require 'sidekiq/scheduled'
2
+
3
+ require "sidekiq/manager"
4
+ require "sidekiq/fetch"
5
+ require "sidekiq/scheduled"
6
6
 
7
7
  module Sidekiq
8
- # The Launcher is a very simple Actor whose job is to
9
- # start, monitor and stop the core Actors in Sidekiq.
10
- # If any of these actors die, the Sidekiq process exits
11
- # immediately.
8
+ # The Launcher starts the Manager and Poller threads and provides the process heartbeat.
12
9
  class Launcher
13
10
  include Util
14
11
 
12
+ STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
13
+
14
+ PROCTITLES = [
15
+ proc { "sidekiq" },
16
+ proc { Sidekiq::VERSION },
17
+ proc { |me, data| data["tag"] },
18
+ proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
+ proc { |me, data| "stopping" if me.stopping? },
20
+ ]
21
+
15
22
  attr_accessor :manager, :poller, :fetcher
16
23
 
17
24
  def initialize(options)
@@ -39,7 +46,7 @@ module Sidekiq
39
46
  # return until all work is complete and cleaned up.
40
47
  # It can take up to the timeout to complete.
41
48
  def stop
42
- deadline = Time.now + @options[:timeout]
49
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
43
50
 
44
51
  @done = true
45
52
  @manager.quiet
@@ -61,10 +68,30 @@ module Sidekiq
61
68
 
62
69
  private unless $TESTING
63
70
 
71
+ def start_heartbeat
72
+ loop do
73
+ heartbeat
74
+ sleep 5
75
+ end
76
+ Sidekiq.logger.info("Heartbeat stopping...")
77
+ end
78
+
79
+ def clear_heartbeat
80
+ # Remove record from Redis since we are shutting down.
81
+ # Note we don't stop the heartbeat thread; if the process
82
+ # doesn't actually exit, it'll reappear in the Web UI.
83
+ Sidekiq.redis do |conn|
84
+ conn.pipelined do
85
+ conn.srem("processes", identity)
86
+ conn.del("#{identity}:workers")
87
+ end
88
+ end
89
+ rescue
90
+ # best effort, ignore network errors
91
+ end
92
+
64
93
  def heartbeat
65
- results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
66
- results.compact!
67
- $0 = results.join(' ')
94
+ $0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
68
95
 
69
96
 
70
97
  end
@@ -72,71 +99,73 @@ module Sidekiq
72
99
  def ❤
73
100
  key = identity
74
101
  fails = procd = 0
102
+
75
103
  begin
76
- Processor::FAILURE.update {|curr| fails = curr; 0 }
77
- Processor::PROCESSED.update {|curr| procd = curr; 0 }
104
+ fails = Processor::FAILURE.reset
105
+ procd = Processor::PROCESSED.reset
106
+ curstate = Processor::WORKER_STATE.dup
107
+
108
+ workers_key = "#{key}:workers"
109
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
78
110
 
79
- workers_key = "#{key}:workers".freeze
80
- nowdate = Time.now.utc.strftime("%Y-%m-%d".freeze)
81
111
  Sidekiq.redis do |conn|
82
112
  conn.multi do
83
- conn.incrby("stat:processed".freeze, procd)
113
+ conn.incrby("stat:processed", procd)
84
114
  conn.incrby("stat:processed:#{nowdate}", procd)
85
- conn.incrby("stat:failed".freeze, fails)
115
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
116
+
117
+ conn.incrby("stat:failed", fails)
86
118
  conn.incrby("stat:failed:#{nowdate}", fails)
119
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
120
+
87
121
  conn.del(workers_key)
88
- Processor::WORKER_STATE.each_pair do |tid, hash|
122
+ curstate.each_pair do |tid, hash|
89
123
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
90
124
  end
91
125
  conn.expire(workers_key, 60)
92
126
  end
93
127
  end
128
+
94
129
  fails = procd = 0
95
130
 
96
- _, exists, _, _, msg = Sidekiq.redis do |conn|
97
- conn.multi do
98
- conn.sadd('processes', key)
131
+ _, exists, _, _, msg = Sidekiq.redis { |conn|
132
+ res = conn.multi {
133
+ conn.sadd("processes", key)
99
134
  conn.exists(key)
100
- conn.hmset(key, 'info', to_json, 'busy', Processor::WORKER_STATE.size, 'beat', Time.now.to_f, 'quiet', @done)
135
+ conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
101
136
  conn.expire(key, 60)
102
137
  conn.rpop("#{key}-signals")
103
- end
104
- end
138
+ }
139
+
140
+ res
141
+ }
105
142
 
106
143
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
107
- fire_event(:heartbeat) if !exists
144
+ fire_event(:heartbeat) unless exists
108
145
 
109
146
  return unless msg
110
147
 
111
- ::Process.kill(msg, $$)
148
+ ::Process.kill(msg, ::Process.pid)
112
149
  rescue => e
113
150
  # ignore all redis/network issues
114
151
  logger.error("heartbeat: #{e.message}")
115
152
  # don't lose the counts if there was a network issue
116
- Processor::PROCESSED.increment(procd)
117
- Processor::FAILURE.increment(fails)
118
- end
119
- end
120
-
121
- def start_heartbeat
122
- while true
123
- heartbeat
124
- sleep 5
153
+ Processor::PROCESSED.incr(procd)
154
+ Processor::FAILURE.incr(fails)
125
155
  end
126
- Sidekiq.logger.info("Heartbeat stopping...")
127
156
  end
128
157
 
129
158
  def to_data
130
159
  @data ||= begin
131
160
  {
132
- 'hostname' => hostname,
133
- 'started_at' => Time.now.to_f,
134
- 'pid' => $$,
135
- 'tag' => @options[:tag] || '',
136
- 'concurrency' => @options[:concurrency],
137
- 'queues' => @options[:queues].uniq,
138
- 'labels' => @options[:labels],
139
- 'identity' => identity,
161
+ "hostname" => hostname,
162
+ "started_at" => Time.now.to_f,
163
+ "pid" => ::Process.pid,
164
+ "tag" => @options[:tag] || "",
165
+ "concurrency" => @options[:concurrency],
166
+ "queues" => @options[:queues].uniq,
167
+ "labels" => @options[:labels],
168
+ "identity" => identity,
140
169
  }
141
170
  end
142
171
  end
@@ -148,20 +177,5 @@ module Sidekiq
148
177
  Sidekiq.dump_json(to_data)
149
178
  end
150
179
  end
151
-
152
- def clear_heartbeat
153
- # Remove record from Redis since we are shutting down.
154
- # Note we don't stop the heartbeat thread; if the process
155
- # doesn't actually exit, it'll reappear in the Web UI.
156
- Sidekiq.redis do |conn|
157
- conn.pipelined do
158
- conn.srem('processes', identity)
159
- conn.del("#{identity}:workers")
160
- end
161
- end
162
- rescue
163
- # best effort, ignore network errors
164
- end
165
-
166
180
  end
167
181
  end