sidekiq 5.2.8 → 6.0.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (61) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +0 -2
  3. data/.standard.yml +20 -0
  4. data/6.0-Upgrade.md +70 -0
  5. data/Changes.md +31 -3
  6. data/Ent-2.0-Upgrade.md +37 -0
  7. data/Ent-Changes.md +12 -0
  8. data/Gemfile +12 -11
  9. data/Gemfile.lock +196 -0
  10. data/Pro-5.0-Upgrade.md +25 -0
  11. data/Pro-Changes.md +12 -3
  12. data/README.md +16 -30
  13. data/Rakefile +5 -4
  14. data/bin/sidekiqload +26 -22
  15. data/bin/sidekiqmon +9 -0
  16. data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
  17. data/lib/generators/sidekiq/worker_generator.rb +12 -14
  18. data/lib/sidekiq/api.rb +138 -151
  19. data/lib/sidekiq/cli.rb +97 -162
  20. data/lib/sidekiq/client.rb +45 -46
  21. data/lib/sidekiq/delay.rb +5 -6
  22. data/lib/sidekiq/exception_handler.rb +10 -12
  23. data/lib/sidekiq/extensions/action_mailer.rb +10 -20
  24. data/lib/sidekiq/extensions/active_record.rb +9 -7
  25. data/lib/sidekiq/extensions/class_methods.rb +9 -7
  26. data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
  27. data/lib/sidekiq/fetch.rb +5 -6
  28. data/lib/sidekiq/job_logger.rb +37 -7
  29. data/lib/sidekiq/job_retry.rb +45 -58
  30. data/lib/sidekiq/launcher.rb +59 -51
  31. data/lib/sidekiq/logger.rb +69 -0
  32. data/lib/sidekiq/manager.rb +7 -9
  33. data/lib/sidekiq/middleware/chain.rb +3 -2
  34. data/lib/sidekiq/middleware/i18n.rb +5 -7
  35. data/lib/sidekiq/monitor.rb +148 -0
  36. data/lib/sidekiq/paginator.rb +11 -12
  37. data/lib/sidekiq/processor.rb +52 -49
  38. data/lib/sidekiq/rails.rb +23 -29
  39. data/lib/sidekiq/redis_connection.rb +31 -37
  40. data/lib/sidekiq/scheduled.rb +17 -19
  41. data/lib/sidekiq/testing/inline.rb +2 -1
  42. data/lib/sidekiq/testing.rb +22 -23
  43. data/lib/sidekiq/util.rb +17 -14
  44. data/lib/sidekiq/version.rb +2 -1
  45. data/lib/sidekiq/web/action.rb +14 -10
  46. data/lib/sidekiq/web/application.rb +60 -57
  47. data/lib/sidekiq/web/helpers.rb +66 -67
  48. data/lib/sidekiq/web/router.rb +17 -14
  49. data/lib/sidekiq/web.rb +41 -49
  50. data/lib/sidekiq/worker.rb +124 -97
  51. data/lib/sidekiq.rb +53 -42
  52. data/sidekiq.gemspec +16 -16
  53. data/web/assets/javascripts/dashboard.js +2 -21
  54. data/web/locales/ja.yml +2 -1
  55. metadata +21 -31
  56. data/.travis.yml +0 -11
  57. data/bin/sidekiqctl +0 -20
  58. data/lib/sidekiq/core_ext.rb +0 -1
  59. data/lib/sidekiq/ctl.rb +0 -221
  60. data/lib/sidekiq/logging.rb +0 -122
  61. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
@@ -1,12 +1,12 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq'
2
+
3
+ require "sidekiq"
3
4
 
4
5
  module Sidekiq
5
6
  module ExceptionHandler
6
-
7
7
  class Logger
8
- def call(ex, ctxHash)
9
- Sidekiq.logger.warn(Sidekiq.dump_json(ctxHash)) if !ctxHash.empty?
8
+ def call(ex, ctx)
9
+ Sidekiq.logger.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
10
10
  Sidekiq.logger.warn("#{ex.class.name}: #{ex.message}")
11
11
  Sidekiq.logger.warn(ex.backtrace.join("\n")) unless ex.backtrace.nil?
12
12
  end
@@ -14,15 +14,13 @@ module Sidekiq
14
14
  Sidekiq.error_handlers << Sidekiq::ExceptionHandler::Logger.new
15
15
  end
16
16
 
17
- def handle_exception(ex, ctxHash={})
17
+ def handle_exception(ex, ctx = {})
18
18
  Sidekiq.error_handlers.each do |handler|
19
- begin
20
- handler.call(ex, ctxHash)
21
- rescue => ex
22
- Sidekiq.logger.error "!!! ERROR HANDLER THREW AN ERROR !!!"
23
- Sidekiq.logger.error ex
24
- Sidekiq.logger.error ex.backtrace.join("\n") unless ex.backtrace.nil?
25
- end
19
+ handler.call(ex, ctx)
20
+ rescue => ex
21
+ Sidekiq.logger.error "!!! ERROR HANDLER THREW AN ERROR !!!"
22
+ Sidekiq.logger.error ex
23
+ Sidekiq.logger.error ex.backtrace.join("\n") unless ex.backtrace.nil?
26
24
  end
27
25
  end
28
26
  end
@@ -1,5 +1,6 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/extensions/generic_proxy'
2
+
3
+ require "sidekiq/extensions/generic_proxy"
3
4
 
4
5
  module Sidekiq
5
6
  module Extensions
@@ -19,39 +20,28 @@ module Sidekiq
19
20
  # The email method can return nil, which causes ActionMailer to return
20
21
  # an undeliverable empty message.
21
22
  if msg
22
- deliver(msg)
23
- else
24
- raise "#{target.name}##{method_name} returned an undeliverable mail object"
25
- end
26
- end
27
-
28
- private
29
-
30
- def deliver(msg)
31
- if msg.respond_to?(:deliver_now)
32
- # Rails 4.2/5.0
33
23
  msg.deliver_now
34
24
  else
35
- # Rails 3.2/4.0/4.1
36
- msg.deliver
25
+ raise "#{target.name}##{method_name} returned an undeliverable mail object"
37
26
  end
38
27
  end
39
28
  end
40
29
 
41
30
  module ActionMailer
42
- def sidekiq_delay(options={})
31
+ def sidekiq_delay(options = {})
43
32
  Proxy.new(DelayedMailer, self, options)
44
33
  end
45
- def sidekiq_delay_for(interval, options={})
46
- Proxy.new(DelayedMailer, self, options.merge('at' => Time.now.to_f + interval.to_f))
34
+
35
+ def sidekiq_delay_for(interval, options = {})
36
+ Proxy.new(DelayedMailer, self, options.merge("at" => Time.now.to_f + interval.to_f))
47
37
  end
48
- def sidekiq_delay_until(timestamp, options={})
49
- Proxy.new(DelayedMailer, self, options.merge('at' => timestamp.to_f))
38
+
39
+ def sidekiq_delay_until(timestamp, options = {})
40
+ Proxy.new(DelayedMailer, self, options.merge("at" => timestamp.to_f))
50
41
  end
51
42
  alias_method :delay, :sidekiq_delay
52
43
  alias_method :delay_for, :sidekiq_delay_for
53
44
  alias_method :delay_until, :sidekiq_delay_until
54
45
  end
55
-
56
46
  end
57
47
  end
@@ -1,5 +1,6 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/extensions/generic_proxy'
2
+
3
+ require "sidekiq/extensions/generic_proxy"
3
4
 
4
5
  module Sidekiq
5
6
  module Extensions
@@ -22,19 +23,20 @@ module Sidekiq
22
23
  end
23
24
 
24
25
  module ActiveRecord
25
- def sidekiq_delay(options={})
26
+ def sidekiq_delay(options = {})
26
27
  Proxy.new(DelayedModel, self, options)
27
28
  end
28
- def sidekiq_delay_for(interval, options={})
29
- Proxy.new(DelayedModel, self, options.merge('at' => Time.now.to_f + interval.to_f))
29
+
30
+ def sidekiq_delay_for(interval, options = {})
31
+ Proxy.new(DelayedModel, self, options.merge("at" => Time.now.to_f + interval.to_f))
30
32
  end
31
- def sidekiq_delay_until(timestamp, options={})
32
- Proxy.new(DelayedModel, self, options.merge('at' => timestamp.to_f))
33
+
34
+ def sidekiq_delay_until(timestamp, options = {})
35
+ Proxy.new(DelayedModel, self, options.merge("at" => timestamp.to_f))
33
36
  end
34
37
  alias_method :delay, :sidekiq_delay
35
38
  alias_method :delay_for, :sidekiq_delay_for
36
39
  alias_method :delay_until, :sidekiq_delay_until
37
40
  end
38
-
39
41
  end
40
42
  end
@@ -1,5 +1,6 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/extensions/generic_proxy'
2
+
3
+ require "sidekiq/extensions/generic_proxy"
3
4
 
4
5
  module Sidekiq
5
6
  module Extensions
@@ -20,20 +21,21 @@ module Sidekiq
20
21
  end
21
22
 
22
23
  module Klass
23
- def sidekiq_delay(options={})
24
+ def sidekiq_delay(options = {})
24
25
  Proxy.new(DelayedClass, self, options)
25
26
  end
26
- def sidekiq_delay_for(interval, options={})
27
- Proxy.new(DelayedClass, self, options.merge('at' => Time.now.to_f + interval.to_f))
27
+
28
+ def sidekiq_delay_for(interval, options = {})
29
+ Proxy.new(DelayedClass, self, options.merge("at" => Time.now.to_f + interval.to_f))
28
30
  end
29
- def sidekiq_delay_until(timestamp, options={})
30
- Proxy.new(DelayedClass, self, options.merge('at' => timestamp.to_f))
31
+
32
+ def sidekiq_delay_until(timestamp, options = {})
33
+ Proxy.new(DelayedClass, self, options.merge("at" => timestamp.to_f))
31
34
  end
32
35
  alias_method :delay, :sidekiq_delay
33
36
  alias_method :delay_for, :sidekiq_delay_for
34
37
  alias_method :delay_until, :sidekiq_delay_until
35
38
  end
36
-
37
39
  end
38
40
  end
39
41
 
@@ -1,12 +1,13 @@
1
1
  # frozen_string_literal: true
2
- require 'yaml'
2
+
3
+ require "yaml"
3
4
 
4
5
  module Sidekiq
5
6
  module Extensions
6
7
  SIZE_LIMIT = 8_192
7
8
 
8
9
  class Proxy < BasicObject
9
- def initialize(performable, target, options={})
10
+ def initialize(performable, target, options = {})
10
11
  @performable = performable
11
12
  @target = target
12
13
  @opts = options
@@ -23,9 +24,8 @@ module Sidekiq
23
24
  if marshalled.size > SIZE_LIMIT
24
25
  ::Sidekiq.logger.warn { "#{@target}.#{name} job argument is #{marshalled.bytesize} bytes, you should refactor it to reduce the size" }
25
26
  end
26
- @performable.client_push({ 'class' => @performable, 'args' => [marshalled] }.merge(@opts))
27
+ @performable.client_push({"class" => @performable, "args" => [marshalled]}.merge(@opts))
27
28
  end
28
29
  end
29
-
30
30
  end
31
31
  end
data/lib/sidekiq/fetch.rb CHANGED
@@ -1,5 +1,6 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq'
2
+
3
+ require "sidekiq"
3
4
 
4
5
  module Sidekiq
5
6
  class BasicFetch
@@ -7,13 +8,13 @@ module Sidekiq
7
8
  # can check if the process is shutting down.
8
9
  TIMEOUT = 2
9
10
 
10
- UnitOfWork = Struct.new(:queue, :job) do
11
+ UnitOfWork = Struct.new(:queue, :job) {
11
12
  def acknowledge
12
13
  # nothing to do
13
14
  end
14
15
 
15
16
  def queue_name
16
- queue.sub(/.*queue:/, '')
17
+ queue.sub(/.*queue:/, "")
17
18
  end
18
19
 
19
20
  def requeue
@@ -21,7 +22,7 @@ module Sidekiq
21
22
  conn.rpush("queue:#{queue_name}", job)
22
23
  end
23
24
  end
24
- end
25
+ }
25
26
 
26
27
  def initialize(options)
27
28
  @strictly_ordered_queues = !!options[:strict]
@@ -52,7 +53,6 @@ module Sidekiq
52
53
  end
53
54
  end
54
55
 
55
-
56
56
  # By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
57
57
  # an instance method will make it async to the Fetcher actor
58
58
  def self.bulk_requeue(inprogress, options)
@@ -76,6 +76,5 @@ module Sidekiq
76
76
  rescue => ex
77
77
  Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
78
78
  end
79
-
80
79
  end
81
80
  end
@@ -1,25 +1,55 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  module Sidekiq
3
4
  class JobLogger
5
+ def initialize(logger = Sidekiq.logger)
6
+ @logger = logger
7
+ end
4
8
 
5
9
  def call(item, queue)
6
10
  start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
7
- logger.info("start")
11
+ @logger.info("start")
12
+
8
13
  yield
9
- logger.info("done: #{elapsed(start)} sec")
14
+
15
+ with_elapsed_time_context(start) do
16
+ @logger.info("done")
17
+ end
10
18
  rescue Exception
11
- logger.info("fail: #{elapsed(start)} sec")
19
+ with_elapsed_time_context(start) do
20
+ @logger.info("fail")
21
+ end
22
+
12
23
  raise
13
24
  end
14
25
 
26
+ def with_job_hash_context(job_hash, &block)
27
+ @logger.with_context(job_hash_context(job_hash), &block)
28
+ end
29
+
30
+ def job_hash_context(job_hash)
31
+ # If we're using a wrapper class, like ActiveJob, use the "wrapped"
32
+ # attribute to expose the underlying thing.
33
+ h = {
34
+ class: job_hash["wrapped"] || job_hash["class"],
35
+ jid: job_hash["jid"],
36
+ }
37
+ h[:bid] = job_hash["bid"] if job_hash["bid"]
38
+ h
39
+ end
40
+
41
+ def with_elapsed_time_context(start, &block)
42
+ @logger.with_context(elapsed_time_context(start), &block)
43
+ end
44
+
45
+ def elapsed_time_context(start)
46
+ {elapsed: elapsed(start).to_s}
47
+ end
48
+
15
49
  private
16
50
 
17
51
  def elapsed(start)
18
52
  (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
19
53
  end
20
-
21
- def logger
22
- Sidekiq.logger
23
- end
24
54
  end
25
55
  end
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/scheduled'
3
- require 'sidekiq/api'
2
+
3
+ require "sidekiq/scheduled"
4
+ require "sidekiq/api"
4
5
 
5
6
  module Sidekiq
6
7
  ##
@@ -81,22 +82,19 @@ module Sidekiq
81
82
  # ignore, will be pushed back onto queue during hard_shutdown
82
83
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
83
84
 
84
- if msg['retry']
85
+ if msg["retry"]
85
86
  attempt_retry(nil, msg, queue, e)
86
87
  else
87
88
  Sidekiq.death_handlers.each do |handler|
88
- begin
89
- handler.call(msg, e)
90
- rescue => handler_ex
91
- handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
92
- end
89
+ handler.call(msg, e)
90
+ rescue => handler_ex
91
+ handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
93
92
  end
94
93
  end
95
94
 
96
95
  raise Handled
97
96
  end
98
97
 
99
-
100
98
  # The local retry support means that any errors that occur within
101
99
  # this block can be associated with the given worker instance.
102
100
  # This is required to support the `sidekiq_retries_exhausted` block.
@@ -116,11 +114,11 @@ module Sidekiq
116
114
  # ignore, will be pushed back onto queue during hard_shutdown
117
115
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
118
116
 
119
- if msg['retry'] == nil
120
- msg['retry'] = worker.class.get_sidekiq_options['retry']
117
+ if msg["retry"].nil?
118
+ msg["retry"] = worker.class.get_sidekiq_options["retry"]
121
119
  end
122
120
 
123
- raise e unless msg['retry']
121
+ raise e unless msg["retry"]
124
122
  attempt_retry(worker, msg, queue, e)
125
123
  # We've handled this error associated with this job, don't
126
124
  # need to handle it at the global level
@@ -133,13 +131,9 @@ module Sidekiq
133
131
  # instantiate the worker instance. All access must be guarded and
134
132
  # best effort.
135
133
  def attempt_retry(worker, msg, queue, exception)
136
- max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
134
+ max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
137
135
 
138
- msg['queue'] = if msg['retry_queue']
139
- msg['retry_queue']
140
- else
141
- queue
142
- end
136
+ msg["queue"] = (msg["retry_queue"] || queue)
143
137
 
144
138
  m = exception_message(exception)
145
139
  if m.respond_to?(:scrub!)
@@ -147,32 +141,32 @@ module Sidekiq
147
141
  m.scrub!
148
142
  end
149
143
 
150
- msg['error_message'] = m
151
- msg['error_class'] = exception.class.name
152
- count = if msg['retry_count']
153
- msg['retried_at'] = Time.now.to_f
154
- msg['retry_count'] += 1
144
+ msg["error_message"] = m
145
+ msg["error_class"] = exception.class.name
146
+ count = if msg["retry_count"]
147
+ msg["retried_at"] = Time.now.to_f
148
+ msg["retry_count"] += 1
155
149
  else
156
- msg['failed_at'] = Time.now.to_f
157
- msg['retry_count'] = 0
150
+ msg["failed_at"] = Time.now.to_f
151
+ msg["retry_count"] = 0
158
152
  end
159
153
 
160
- if msg['backtrace'] == true
161
- msg['error_backtrace'] = exception.backtrace
162
- elsif !msg['backtrace']
154
+ if msg["backtrace"] == true
155
+ msg["error_backtrace"] = exception.backtrace
156
+ elsif !msg["backtrace"]
163
157
  # do nothing
164
- elsif msg['backtrace'].to_i != 0
165
- msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
158
+ elsif msg["backtrace"].to_i != 0
159
+ msg["error_backtrace"] = exception.backtrace[0...msg["backtrace"].to_i]
166
160
  end
167
161
 
168
162
  if count < max_retry_attempts
169
163
  delay = delay_for(worker, count, exception)
170
164
  # Logging here can break retries if the logging device raises ENOSPC #3979
171
- #logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
165
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
172
166
  retry_at = Time.now.to_f + delay
173
167
  payload = Sidekiq.dump_json(msg)
174
168
  Sidekiq.redis do |conn|
175
- conn.zadd('retry', retry_at.to_s, payload)
169
+ conn.zadd("retry", retry_at.to_s, payload)
176
170
  end
177
171
  else
178
172
  # Goodbye dear message, you (re)tried your best I'm sure.
@@ -182,25 +176,23 @@ module Sidekiq
182
176
 
183
177
  def retries_exhausted(worker, msg, exception)
184
178
  begin
185
- block = worker && worker.sidekiq_retries_exhausted_block
186
- block.call(msg, exception) if block
179
+ block = worker&.sidekiq_retries_exhausted_block
180
+ block&.call(msg, exception)
187
181
  rescue => e
188
- handle_exception(e, { context: "Error calling retries_exhausted", job: msg })
182
+ handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
189
183
  end
190
184
 
191
185
  Sidekiq.death_handlers.each do |handler|
192
- begin
193
- handler.call(msg, exception)
194
- rescue => e
195
- handle_exception(e, { context: "Error calling death handler", job: msg })
196
- end
186
+ handler.call(msg, exception)
187
+ rescue => e
188
+ handle_exception(e, {context: "Error calling death handler", job: msg})
197
189
  end
198
190
 
199
- send_to_morgue(msg) unless msg['dead'] == false
191
+ send_to_morgue(msg) unless msg["dead"] == false
200
192
  end
201
193
 
202
194
  def send_to_morgue(msg)
203
- logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
195
+ logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
204
196
  payload = Sidekiq.dump_json(msg)
205
197
  DeadSet.new.kill(payload, notify_failure: false)
206
198
  end
@@ -214,7 +206,7 @@ module Sidekiq
214
206
  end
215
207
 
216
208
  def delay_for(worker, count, exception)
217
- if worker && worker.sidekiq_retry_in_block
209
+ if worker&.sidekiq_retry_in_block
218
210
  custom_retry_in = retry_in(worker, count, exception).to_i
219
211
  return custom_retry_in if custom_retry_in > 0
220
212
  end
@@ -223,16 +215,14 @@ module Sidekiq
223
215
 
224
216
  # delayed_job uses the same basic formula
225
217
  def seconds_to_delay(count)
226
- (count ** 4) + 15 + (rand(30)*(count+1))
218
+ (count**4) + 15 + (rand(30) * (count + 1))
227
219
  end
228
220
 
229
221
  def retry_in(worker, count, exception)
230
- begin
231
- worker.sidekiq_retry_in_block.call(count, exception)
232
- rescue Exception => e
233
- handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
234
- nil
235
- end
222
+ worker.sidekiq_retry_in_block.call(count, exception)
223
+ rescue Exception => e
224
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
225
+ nil
236
226
  end
237
227
 
238
228
  def exception_caused_by_shutdown?(e, checked_causes = [])
@@ -249,14 +239,11 @@ module Sidekiq
249
239
  # Extract message from exception.
250
240
  # Set a default if the message raises an error
251
241
  def exception_message(exception)
252
- begin
253
- # App code can stuff all sorts of crazy binary data into the error message
254
- # that won't convert to JSON.
255
- exception.message.to_s[0, 10_000]
256
- rescue
257
- "!!! ERROR MESSAGE THREW AN ERROR !!!".dup
258
- end
242
+ # App code can stuff all sorts of crazy binary data into the error message
243
+ # that won't convert to JSON.
244
+ exception.message.to_s[0, 10_000]
245
+ rescue
246
+ +"!!! ERROR MESSAGE THREW AN ERROR !!!"
259
247
  end
260
-
261
248
  end
262
249
  end
@@ -1,19 +1,25 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/manager'
3
- require 'sidekiq/fetch'
4
- require 'sidekiq/scheduled'
2
+
3
+ require "sidekiq/manager"
4
+ require "sidekiq/fetch"
5
+ require "sidekiq/scheduled"
5
6
 
6
7
  module Sidekiq
7
- # The Launcher is a very simple Actor whose job is to
8
- # start, monitor and stop the core Actors in Sidekiq.
9
- # If any of these actors die, the Sidekiq process exits
10
- # immediately.
8
+ # The Launcher starts the Manager and Poller threads and provides the process heartbeat.
11
9
  class Launcher
12
10
  include Util
13
11
 
14
- attr_accessor :manager, :poller, :fetcher
12
+ STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
15
13
 
16
- STATS_TTL = 5*365*24*60*60
14
+ PROCTITLES = [
15
+ proc { "sidekiq" },
16
+ proc { Sidekiq::VERSION },
17
+ proc { |me, data| data["tag"] },
18
+ proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
+ proc { |me, data| "stopping" if me.stopping? },
20
+ ]
21
+
22
+ attr_accessor :manager, :poller, :fetcher
17
23
 
18
24
  def initialize(options)
19
25
  @manager = Sidekiq::Manager.new(options)
@@ -62,10 +68,30 @@ module Sidekiq
62
68
 
63
69
  private unless $TESTING
64
70
 
71
+ def start_heartbeat
72
+ loop do
73
+ heartbeat
74
+ sleep 5
75
+ end
76
+ Sidekiq.logger.info("Heartbeat stopping...")
77
+ end
78
+
79
+ def clear_heartbeat
80
+ # Remove record from Redis since we are shutting down.
81
+ # Note we don't stop the heartbeat thread; if the process
82
+ # doesn't actually exit, it'll reappear in the Web UI.
83
+ Sidekiq.redis do |conn|
84
+ conn.pipelined do
85
+ conn.srem("processes", identity)
86
+ conn.del("#{identity}:workers")
87
+ end
88
+ end
89
+ rescue
90
+ # best effort, ignore network errors
91
+ end
92
+
65
93
  def heartbeat
66
- results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
67
- results.compact!
68
- $0 = results.join(' ')
94
+ $0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
69
95
 
70
96
 
71
97
  end
@@ -73,6 +99,7 @@ module Sidekiq
73
99
  def ❤
74
100
  key = identity
75
101
  fails = procd = 0
102
+
76
103
  begin
77
104
  fails = Processor::FAILURE.reset
78
105
  procd = Processor::PROCESSED.reset
@@ -80,6 +107,7 @@ module Sidekiq
80
107
 
81
108
  workers_key = "#{key}:workers"
82
109
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
110
+
83
111
  Sidekiq.redis do |conn|
84
112
  conn.multi do
85
113
  conn.incrby("stat:processed", procd)
@@ -97,24 +125,27 @@ module Sidekiq
97
125
  conn.expire(workers_key, 60)
98
126
  end
99
127
  end
128
+
100
129
  fails = procd = 0
101
130
 
102
- _, exists, _, _, msg = Sidekiq.redis do |conn|
103
- conn.multi do
104
- conn.sadd('processes', key)
131
+ _, exists, _, _, msg = Sidekiq.redis { |conn|
132
+ res = conn.multi {
133
+ conn.sadd("processes", key)
105
134
  conn.exists(key)
106
- conn.hmset(key, 'info', to_json, 'busy', curstate.size, 'beat', Time.now.to_f, 'quiet', @done)
135
+ conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
107
136
  conn.expire(key, 60)
108
137
  conn.rpop("#{key}-signals")
109
- end
110
- end
138
+ }
139
+
140
+ res
141
+ }
111
142
 
112
143
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
113
- fire_event(:heartbeat) if !exists
144
+ fire_event(:heartbeat) unless exists
114
145
 
115
146
  return unless msg
116
147
 
117
- ::Process.kill(msg, $$)
148
+ ::Process.kill(msg, ::Process.pid)
118
149
  rescue => e
119
150
  # ignore all redis/network issues
120
151
  logger.error("heartbeat: #{e.message}")
@@ -124,25 +155,17 @@ module Sidekiq
124
155
  end
125
156
  end
126
157
 
127
- def start_heartbeat
128
- while true
129
- heartbeat
130
- sleep 5
131
- end
132
- Sidekiq.logger.info("Heartbeat stopping...")
133
- end
134
-
135
158
  def to_data
136
159
  @data ||= begin
137
160
  {
138
- 'hostname' => hostname,
139
- 'started_at' => Time.now.to_f,
140
- 'pid' => $$,
141
- 'tag' => @options[:tag] || '',
142
- 'concurrency' => @options[:concurrency],
143
- 'queues' => @options[:queues].uniq,
144
- 'labels' => @options[:labels],
145
- 'identity' => identity,
161
+ "hostname" => hostname,
162
+ "started_at" => Time.now.to_f,
163
+ "pid" => ::Process.pid,
164
+ "tag" => @options[:tag] || "",
165
+ "concurrency" => @options[:concurrency],
166
+ "queues" => @options[:queues].uniq,
167
+ "labels" => @options[:labels],
168
+ "identity" => identity,
146
169
  }
147
170
  end
148
171
  end
@@ -154,20 +177,5 @@ module Sidekiq
154
177
  Sidekiq.dump_json(to_data)
155
178
  end
156
179
  end
157
-
158
- def clear_heartbeat
159
- # Remove record from Redis since we are shutting down.
160
- # Note we don't stop the heartbeat thread; if the process
161
- # doesn't actually exit, it'll reappear in the Web UI.
162
- Sidekiq.redis do |conn|
163
- conn.pipelined do
164
- conn.srem('processes', identity)
165
- conn.del("#{identity}:workers")
166
- end
167
- end
168
- rescue
169
- # best effort, ignore network errors
170
- end
171
-
172
180
  end
173
181
  end