sidekiq 5.2.8

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (119) hide show
  1. checksums.yaml +7 -0
  2. data/.circleci/config.yml +61 -0
  3. data/.github/contributing.md +32 -0
  4. data/.github/issue_template.md +11 -0
  5. data/.gitignore +15 -0
  6. data/.travis.yml +11 -0
  7. data/3.0-Upgrade.md +70 -0
  8. data/4.0-Upgrade.md +53 -0
  9. data/5.0-Upgrade.md +56 -0
  10. data/COMM-LICENSE +97 -0
  11. data/Changes.md +1542 -0
  12. data/Ent-Changes.md +238 -0
  13. data/Gemfile +23 -0
  14. data/LICENSE +9 -0
  15. data/Pro-2.0-Upgrade.md +138 -0
  16. data/Pro-3.0-Upgrade.md +44 -0
  17. data/Pro-4.0-Upgrade.md +35 -0
  18. data/Pro-Changes.md +759 -0
  19. data/README.md +109 -0
  20. data/Rakefile +9 -0
  21. data/bin/sidekiq +18 -0
  22. data/bin/sidekiqctl +20 -0
  23. data/bin/sidekiqload +149 -0
  24. data/code_of_conduct.md +50 -0
  25. data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
  26. data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
  27. data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
  28. data/lib/generators/sidekiq/worker_generator.rb +49 -0
  29. data/lib/sidekiq.rb +237 -0
  30. data/lib/sidekiq/api.rb +940 -0
  31. data/lib/sidekiq/cli.rb +445 -0
  32. data/lib/sidekiq/client.rb +243 -0
  33. data/lib/sidekiq/core_ext.rb +1 -0
  34. data/lib/sidekiq/ctl.rb +221 -0
  35. data/lib/sidekiq/delay.rb +42 -0
  36. data/lib/sidekiq/exception_handler.rb +29 -0
  37. data/lib/sidekiq/extensions/action_mailer.rb +57 -0
  38. data/lib/sidekiq/extensions/active_record.rb +40 -0
  39. data/lib/sidekiq/extensions/class_methods.rb +40 -0
  40. data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
  41. data/lib/sidekiq/fetch.rb +81 -0
  42. data/lib/sidekiq/job_logger.rb +25 -0
  43. data/lib/sidekiq/job_retry.rb +262 -0
  44. data/lib/sidekiq/launcher.rb +173 -0
  45. data/lib/sidekiq/logging.rb +122 -0
  46. data/lib/sidekiq/manager.rb +137 -0
  47. data/lib/sidekiq/middleware/chain.rb +150 -0
  48. data/lib/sidekiq/middleware/i18n.rb +42 -0
  49. data/lib/sidekiq/middleware/server/active_record.rb +23 -0
  50. data/lib/sidekiq/paginator.rb +43 -0
  51. data/lib/sidekiq/processor.rb +279 -0
  52. data/lib/sidekiq/rails.rb +58 -0
  53. data/lib/sidekiq/redis_connection.rb +144 -0
  54. data/lib/sidekiq/scheduled.rb +174 -0
  55. data/lib/sidekiq/testing.rb +333 -0
  56. data/lib/sidekiq/testing/inline.rb +29 -0
  57. data/lib/sidekiq/util.rb +66 -0
  58. data/lib/sidekiq/version.rb +4 -0
  59. data/lib/sidekiq/web.rb +213 -0
  60. data/lib/sidekiq/web/action.rb +89 -0
  61. data/lib/sidekiq/web/application.rb +353 -0
  62. data/lib/sidekiq/web/helpers.rb +325 -0
  63. data/lib/sidekiq/web/router.rb +100 -0
  64. data/lib/sidekiq/worker.rb +220 -0
  65. data/sidekiq.gemspec +21 -0
  66. data/web/assets/images/favicon.ico +0 -0
  67. data/web/assets/images/logo.png +0 -0
  68. data/web/assets/images/status.png +0 -0
  69. data/web/assets/javascripts/application.js +92 -0
  70. data/web/assets/javascripts/dashboard.js +315 -0
  71. data/web/assets/stylesheets/application-rtl.css +246 -0
  72. data/web/assets/stylesheets/application.css +1144 -0
  73. data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
  74. data/web/assets/stylesheets/bootstrap.css +5 -0
  75. data/web/locales/ar.yml +81 -0
  76. data/web/locales/cs.yml +78 -0
  77. data/web/locales/da.yml +68 -0
  78. data/web/locales/de.yml +69 -0
  79. data/web/locales/el.yml +68 -0
  80. data/web/locales/en.yml +81 -0
  81. data/web/locales/es.yml +70 -0
  82. data/web/locales/fa.yml +80 -0
  83. data/web/locales/fr.yml +78 -0
  84. data/web/locales/he.yml +79 -0
  85. data/web/locales/hi.yml +75 -0
  86. data/web/locales/it.yml +69 -0
  87. data/web/locales/ja.yml +80 -0
  88. data/web/locales/ko.yml +68 -0
  89. data/web/locales/nb.yml +77 -0
  90. data/web/locales/nl.yml +68 -0
  91. data/web/locales/pl.yml +59 -0
  92. data/web/locales/pt-br.yml +68 -0
  93. data/web/locales/pt.yml +67 -0
  94. data/web/locales/ru.yml +78 -0
  95. data/web/locales/sv.yml +68 -0
  96. data/web/locales/ta.yml +75 -0
  97. data/web/locales/uk.yml +76 -0
  98. data/web/locales/ur.yml +80 -0
  99. data/web/locales/zh-cn.yml +68 -0
  100. data/web/locales/zh-tw.yml +68 -0
  101. data/web/views/_footer.erb +20 -0
  102. data/web/views/_job_info.erb +88 -0
  103. data/web/views/_nav.erb +52 -0
  104. data/web/views/_paging.erb +23 -0
  105. data/web/views/_poll_link.erb +7 -0
  106. data/web/views/_status.erb +4 -0
  107. data/web/views/_summary.erb +40 -0
  108. data/web/views/busy.erb +98 -0
  109. data/web/views/dashboard.erb +75 -0
  110. data/web/views/dead.erb +34 -0
  111. data/web/views/layout.erb +40 -0
  112. data/web/views/morgue.erb +75 -0
  113. data/web/views/queue.erb +46 -0
  114. data/web/views/queues.erb +30 -0
  115. data/web/views/retries.erb +80 -0
  116. data/web/views/retry.erb +34 -0
  117. data/web/views/scheduled.erb +54 -0
  118. data/web/views/scheduled_job_info.erb +8 -0
  119. metadata +230 -0
@@ -0,0 +1,81 @@
1
+ # frozen_string_literal: true
2
+ require 'sidekiq'
3
+
4
+ module Sidekiq
5
+ class BasicFetch
6
+ # We want the fetch operation to timeout every few seconds so the thread
7
+ # can check if the process is shutting down.
8
+ TIMEOUT = 2
9
+
10
+ UnitOfWork = Struct.new(:queue, :job) do
11
+ def acknowledge
12
+ # nothing to do
13
+ end
14
+
15
+ def queue_name
16
+ queue.sub(/.*queue:/, '')
17
+ end
18
+
19
+ def requeue
20
+ Sidekiq.redis do |conn|
21
+ conn.rpush("queue:#{queue_name}", job)
22
+ end
23
+ end
24
+ end
25
+
26
+ def initialize(options)
27
+ @strictly_ordered_queues = !!options[:strict]
28
+ @queues = options[:queues].map { |q| "queue:#{q}" }
29
+ if @strictly_ordered_queues
30
+ @queues = @queues.uniq
31
+ @queues << TIMEOUT
32
+ end
33
+ end
34
+
35
+ def retrieve_work
36
+ work = Sidekiq.redis { |conn| conn.brpop(*queues_cmd) }
37
+ UnitOfWork.new(*work) if work
38
+ end
39
+
40
+ # Creating the Redis#brpop command takes into account any
41
+ # configured queue weights. By default Redis#brpop returns
42
+ # data from the first queue that has pending elements. We
43
+ # recreate the queue command each time we invoke Redis#brpop
44
+ # to honor weights and avoid queue starvation.
45
+ def queues_cmd
46
+ if @strictly_ordered_queues
47
+ @queues
48
+ else
49
+ queues = @queues.shuffle.uniq
50
+ queues << TIMEOUT
51
+ queues
52
+ end
53
+ end
54
+
55
+
56
+ # By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
57
+ # an instance method will make it async to the Fetcher actor
58
+ def self.bulk_requeue(inprogress, options)
59
+ return if inprogress.empty?
60
+
61
+ Sidekiq.logger.debug { "Re-queueing terminated jobs" }
62
+ jobs_to_requeue = {}
63
+ inprogress.each do |unit_of_work|
64
+ jobs_to_requeue[unit_of_work.queue_name] ||= []
65
+ jobs_to_requeue[unit_of_work.queue_name] << unit_of_work.job
66
+ end
67
+
68
+ Sidekiq.redis do |conn|
69
+ conn.pipelined do
70
+ jobs_to_requeue.each do |queue, jobs|
71
+ conn.rpush("queue:#{queue}", jobs)
72
+ end
73
+ end
74
+ end
75
+ Sidekiq.logger.info("Pushed #{inprogress.size} jobs back to Redis")
76
+ rescue => ex
77
+ Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
78
+ end
79
+
80
+ end
81
+ end
@@ -0,0 +1,25 @@
1
+ # frozen_string_literal: true
2
+ module Sidekiq
3
+ class JobLogger
4
+
5
+ def call(item, queue)
6
+ start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
7
+ logger.info("start")
8
+ yield
9
+ logger.info("done: #{elapsed(start)} sec")
10
+ rescue Exception
11
+ logger.info("fail: #{elapsed(start)} sec")
12
+ raise
13
+ end
14
+
15
+ private
16
+
17
+ def elapsed(start)
18
+ (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
19
+ end
20
+
21
+ def logger
22
+ Sidekiq.logger
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,262 @@
1
+ # frozen_string_literal: true
2
+ require 'sidekiq/scheduled'
3
+ require 'sidekiq/api'
4
+
5
+ module Sidekiq
6
+ ##
7
+ # Automatically retry jobs that fail in Sidekiq.
8
+ # Sidekiq's retry support assumes a typical development lifecycle:
9
+ #
10
+ # 0. Push some code changes with a bug in it.
11
+ # 1. Bug causes job processing to fail, Sidekiq's middleware captures
12
+ # the job and pushes it onto a retry queue.
13
+ # 2. Sidekiq retries jobs in the retry queue multiple times with
14
+ # an exponential delay, the job continues to fail.
15
+ # 3. After a few days, a developer deploys a fix. The job is
16
+ # reprocessed successfully.
17
+ # 4. Once retries are exhausted, Sidekiq will give up and move the
18
+ # job to the Dead Job Queue (aka morgue) where it must be dealt with
19
+ # manually in the Web UI.
20
+ # 5. After 6 months on the DJQ, Sidekiq will discard the job.
21
+ #
22
+ # A job looks like:
23
+ #
24
+ # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
25
+ #
26
+ # The 'retry' option also accepts a number (in place of 'true'):
27
+ #
28
+ # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
29
+ #
30
+ # The job will be retried this number of times before giving up. (If simply
31
+ # 'true', Sidekiq retries 25 times)
32
+ #
33
+ # We'll add a bit more data to the job to support retries:
34
+ #
35
+ # * 'queue' - the queue to use
36
+ # * 'retry_count' - number of times we've retried so far.
37
+ # * 'error_message' - the message from the exception
38
+ # * 'error_class' - the exception class
39
+ # * 'failed_at' - the first time it failed
40
+ # * 'retried_at' - the last time it was retried
41
+ # * 'backtrace' - the number of lines of error backtrace to store
42
+ #
43
+ # We don't store the backtrace by default as that can add a lot of overhead
44
+ # to the job and everyone is using an error service, right?
45
+ #
46
+ # The default number of retries is 25 which works out to about 3 weeks
47
+ # You can change the default maximum number of retries in your initializer:
48
+ #
49
+ # Sidekiq.options[:max_retries] = 7
50
+ #
51
+ # or limit the number of retries for a particular worker with:
52
+ #
53
+ # class MyWorker
54
+ # include Sidekiq::Worker
55
+ # sidekiq_options :retry => 10
56
+ # end
57
+ #
58
+ class JobRetry
59
+ class Handled < ::RuntimeError; end
60
+ class Skip < Handled; end
61
+
62
+ include Sidekiq::Util
63
+
64
+ DEFAULT_MAX_RETRY_ATTEMPTS = 25
65
+
66
+ def initialize(options = {})
67
+ @max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
68
+ end
69
+
70
+ # The global retry handler requires only the barest of data.
71
+ # We want to be able to retry as much as possible so we don't
72
+ # require the worker to be instantiated.
73
+ def global(msg, queue)
74
+ yield
75
+ rescue Handled => ex
76
+ raise ex
77
+ rescue Sidekiq::Shutdown => ey
78
+ # ignore, will be pushed back onto queue during hard_shutdown
79
+ raise ey
80
+ rescue Exception => e
81
+ # ignore, will be pushed back onto queue during hard_shutdown
82
+ raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
83
+
84
+ if msg['retry']
85
+ attempt_retry(nil, msg, queue, e)
86
+ else
87
+ Sidekiq.death_handlers.each do |handler|
88
+ begin
89
+ handler.call(msg, e)
90
+ rescue => handler_ex
91
+ handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
92
+ end
93
+ end
94
+ end
95
+
96
+ raise Handled
97
+ end
98
+
99
+
100
+ # The local retry support means that any errors that occur within
101
+ # this block can be associated with the given worker instance.
102
+ # This is required to support the `sidekiq_retries_exhausted` block.
103
+ #
104
+ # Note that any exception from the block is wrapped in the Skip
105
+ # exception so the global block does not reprocess the error. The
106
+ # Skip exception is unwrapped within Sidekiq::Processor#process before
107
+ # calling the handle_exception handlers.
108
+ def local(worker, msg, queue)
109
+ yield
110
+ rescue Handled => ex
111
+ raise ex
112
+ rescue Sidekiq::Shutdown => ey
113
+ # ignore, will be pushed back onto queue during hard_shutdown
114
+ raise ey
115
+ rescue Exception => e
116
+ # ignore, will be pushed back onto queue during hard_shutdown
117
+ raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
118
+
119
+ if msg['retry'] == nil
120
+ msg['retry'] = worker.class.get_sidekiq_options['retry']
121
+ end
122
+
123
+ raise e unless msg['retry']
124
+ attempt_retry(worker, msg, queue, e)
125
+ # We've handled this error associated with this job, don't
126
+ # need to handle it at the global level
127
+ raise Skip
128
+ end
129
+
130
+ private
131
+
132
+ # Note that +worker+ can be nil here if an error is raised before we can
133
+ # instantiate the worker instance. All access must be guarded and
134
+ # best effort.
135
+ def attempt_retry(worker, msg, queue, exception)
136
+ max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
137
+
138
+ msg['queue'] = if msg['retry_queue']
139
+ msg['retry_queue']
140
+ else
141
+ queue
142
+ end
143
+
144
+ m = exception_message(exception)
145
+ if m.respond_to?(:scrub!)
146
+ m.force_encoding("utf-8")
147
+ m.scrub!
148
+ end
149
+
150
+ msg['error_message'] = m
151
+ msg['error_class'] = exception.class.name
152
+ count = if msg['retry_count']
153
+ msg['retried_at'] = Time.now.to_f
154
+ msg['retry_count'] += 1
155
+ else
156
+ msg['failed_at'] = Time.now.to_f
157
+ msg['retry_count'] = 0
158
+ end
159
+
160
+ if msg['backtrace'] == true
161
+ msg['error_backtrace'] = exception.backtrace
162
+ elsif !msg['backtrace']
163
+ # do nothing
164
+ elsif msg['backtrace'].to_i != 0
165
+ msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
166
+ end
167
+
168
+ if count < max_retry_attempts
169
+ delay = delay_for(worker, count, exception)
170
+ # Logging here can break retries if the logging device raises ENOSPC #3979
171
+ #logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
172
+ retry_at = Time.now.to_f + delay
173
+ payload = Sidekiq.dump_json(msg)
174
+ Sidekiq.redis do |conn|
175
+ conn.zadd('retry', retry_at.to_s, payload)
176
+ end
177
+ else
178
+ # Goodbye dear message, you (re)tried your best I'm sure.
179
+ retries_exhausted(worker, msg, exception)
180
+ end
181
+ end
182
+
183
+ def retries_exhausted(worker, msg, exception)
184
+ begin
185
+ block = worker && worker.sidekiq_retries_exhausted_block
186
+ block.call(msg, exception) if block
187
+ rescue => e
188
+ handle_exception(e, { context: "Error calling retries_exhausted", job: msg })
189
+ end
190
+
191
+ Sidekiq.death_handlers.each do |handler|
192
+ begin
193
+ handler.call(msg, exception)
194
+ rescue => e
195
+ handle_exception(e, { context: "Error calling death handler", job: msg })
196
+ end
197
+ end
198
+
199
+ send_to_morgue(msg) unless msg['dead'] == false
200
+ end
201
+
202
+ def send_to_morgue(msg)
203
+ logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
204
+ payload = Sidekiq.dump_json(msg)
205
+ DeadSet.new.kill(payload, notify_failure: false)
206
+ end
207
+
208
+ def retry_attempts_from(msg_retry, default)
209
+ if msg_retry.is_a?(Integer)
210
+ msg_retry
211
+ else
212
+ default
213
+ end
214
+ end
215
+
216
+ def delay_for(worker, count, exception)
217
+ if worker && worker.sidekiq_retry_in_block
218
+ custom_retry_in = retry_in(worker, count, exception).to_i
219
+ return custom_retry_in if custom_retry_in > 0
220
+ end
221
+ seconds_to_delay(count)
222
+ end
223
+
224
+ # delayed_job uses the same basic formula
225
+ def seconds_to_delay(count)
226
+ (count ** 4) + 15 + (rand(30)*(count+1))
227
+ end
228
+
229
+ def retry_in(worker, count, exception)
230
+ begin
231
+ worker.sidekiq_retry_in_block.call(count, exception)
232
+ rescue Exception => e
233
+ handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
234
+ nil
235
+ end
236
+ end
237
+
238
+ def exception_caused_by_shutdown?(e, checked_causes = [])
239
+ return false unless e.cause
240
+
241
+ # Handle circular causes
242
+ checked_causes << e.object_id
243
+ return false if checked_causes.include?(e.cause.object_id)
244
+
245
+ e.cause.instance_of?(Sidekiq::Shutdown) ||
246
+ exception_caused_by_shutdown?(e.cause, checked_causes)
247
+ end
248
+
249
+ # Extract message from exception.
250
+ # Set a default if the message raises an error
251
+ def exception_message(exception)
252
+ begin
253
+ # App code can stuff all sorts of crazy binary data into the error message
254
+ # that won't convert to JSON.
255
+ exception.message.to_s[0, 10_000]
256
+ rescue
257
+ "!!! ERROR MESSAGE THREW AN ERROR !!!".dup
258
+ end
259
+ end
260
+
261
+ end
262
+ end
@@ -0,0 +1,173 @@
1
+ # frozen_string_literal: true
2
+ require 'sidekiq/manager'
3
+ require 'sidekiq/fetch'
4
+ require 'sidekiq/scheduled'
5
+
6
+ module Sidekiq
7
+ # The Launcher is a very simple Actor whose job is to
8
+ # start, monitor and stop the core Actors in Sidekiq.
9
+ # If any of these actors die, the Sidekiq process exits
10
+ # immediately.
11
+ class Launcher
12
+ include Util
13
+
14
+ attr_accessor :manager, :poller, :fetcher
15
+
16
+ STATS_TTL = 5*365*24*60*60
17
+
18
+ def initialize(options)
19
+ @manager = Sidekiq::Manager.new(options)
20
+ @poller = Sidekiq::Scheduled::Poller.new
21
+ @done = false
22
+ @options = options
23
+ end
24
+
25
+ def run
26
+ @thread = safe_thread("heartbeat", &method(:start_heartbeat))
27
+ @poller.start
28
+ @manager.start
29
+ end
30
+
31
+ # Stops this instance from processing any more jobs,
32
+ #
33
+ def quiet
34
+ @done = true
35
+ @manager.quiet
36
+ @poller.terminate
37
+ end
38
+
39
+ # Shuts down the process. This method does not
40
+ # return until all work is complete and cleaned up.
41
+ # It can take up to the timeout to complete.
42
+ def stop
43
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
44
+
45
+ @done = true
46
+ @manager.quiet
47
+ @poller.terminate
48
+
49
+ @manager.stop(deadline)
50
+
51
+ # Requeue everything in case there was a worker who grabbed work while stopped
52
+ # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
53
+ strategy = (@options[:fetch] || Sidekiq::BasicFetch)
54
+ strategy.bulk_requeue([], @options)
55
+
56
+ clear_heartbeat
57
+ end
58
+
59
+ def stopping?
60
+ @done
61
+ end
62
+
63
+ private unless $TESTING
64
+
65
+ def heartbeat
66
+ results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
67
+ results.compact!
68
+ $0 = results.join(' ')
69
+
70
+
71
+ end
72
+
73
+ def ❤
74
+ key = identity
75
+ fails = procd = 0
76
+ begin
77
+ fails = Processor::FAILURE.reset
78
+ procd = Processor::PROCESSED.reset
79
+ curstate = Processor::WORKER_STATE.dup
80
+
81
+ workers_key = "#{key}:workers"
82
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
83
+ Sidekiq.redis do |conn|
84
+ conn.multi do
85
+ conn.incrby("stat:processed", procd)
86
+ conn.incrby("stat:processed:#{nowdate}", procd)
87
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
88
+
89
+ conn.incrby("stat:failed", fails)
90
+ conn.incrby("stat:failed:#{nowdate}", fails)
91
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
92
+
93
+ conn.del(workers_key)
94
+ curstate.each_pair do |tid, hash|
95
+ conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
96
+ end
97
+ conn.expire(workers_key, 60)
98
+ end
99
+ end
100
+ fails = procd = 0
101
+
102
+ _, exists, _, _, msg = Sidekiq.redis do |conn|
103
+ conn.multi do
104
+ conn.sadd('processes', key)
105
+ conn.exists(key)
106
+ conn.hmset(key, 'info', to_json, 'busy', curstate.size, 'beat', Time.now.to_f, 'quiet', @done)
107
+ conn.expire(key, 60)
108
+ conn.rpop("#{key}-signals")
109
+ end
110
+ end
111
+
112
+ # first heartbeat or recovering from an outage and need to reestablish our heartbeat
113
+ fire_event(:heartbeat) if !exists
114
+
115
+ return unless msg
116
+
117
+ ::Process.kill(msg, $$)
118
+ rescue => e
119
+ # ignore all redis/network issues
120
+ logger.error("heartbeat: #{e.message}")
121
+ # don't lose the counts if there was a network issue
122
+ Processor::PROCESSED.incr(procd)
123
+ Processor::FAILURE.incr(fails)
124
+ end
125
+ end
126
+
127
+ def start_heartbeat
128
+ while true
129
+ heartbeat
130
+ sleep 5
131
+ end
132
+ Sidekiq.logger.info("Heartbeat stopping...")
133
+ end
134
+
135
+ def to_data
136
+ @data ||= begin
137
+ {
138
+ 'hostname' => hostname,
139
+ 'started_at' => Time.now.to_f,
140
+ 'pid' => $$,
141
+ 'tag' => @options[:tag] || '',
142
+ 'concurrency' => @options[:concurrency],
143
+ 'queues' => @options[:queues].uniq,
144
+ 'labels' => @options[:labels],
145
+ 'identity' => identity,
146
+ }
147
+ end
148
+ end
149
+
150
+ def to_json
151
+ @json ||= begin
152
+ # this data changes infrequently so dump it to a string
153
+ # now so we don't need to dump it every heartbeat.
154
+ Sidekiq.dump_json(to_data)
155
+ end
156
+ end
157
+
158
+ def clear_heartbeat
159
+ # Remove record from Redis since we are shutting down.
160
+ # Note we don't stop the heartbeat thread; if the process
161
+ # doesn't actually exit, it'll reappear in the Web UI.
162
+ Sidekiq.redis do |conn|
163
+ conn.pipelined do
164
+ conn.srem('processes', identity)
165
+ conn.del("#{identity}:workers")
166
+ end
167
+ end
168
+ rescue
169
+ # best effort, ignore network errors
170
+ end
171
+
172
+ end
173
+ end