sidekiq 5.2.4 → 6.0.1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (78) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +82 -0
  3. data/.gitignore +0 -2
  4. data/.standard.yml +20 -0
  5. data/6.0-Upgrade.md +72 -0
  6. data/COMM-LICENSE +11 -9
  7. data/Changes.md +129 -0
  8. data/Ent-2.0-Upgrade.md +37 -0
  9. data/Ent-Changes.md +32 -1
  10. data/Gemfile +12 -17
  11. data/Gemfile.lock +196 -0
  12. data/Pro-5.0-Upgrade.md +25 -0
  13. data/Pro-Changes.md +26 -2
  14. data/README.md +18 -31
  15. data/Rakefile +5 -4
  16. data/bin/sidekiqload +33 -25
  17. data/bin/sidekiqmon +8 -0
  18. data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
  19. data/lib/generators/sidekiq/worker_generator.rb +20 -12
  20. data/lib/sidekiq.rb +62 -43
  21. data/lib/sidekiq/api.rb +196 -175
  22. data/lib/sidekiq/cli.rb +118 -178
  23. data/lib/sidekiq/client.rb +51 -46
  24. data/lib/sidekiq/delay.rb +5 -6
  25. data/lib/sidekiq/exception_handler.rb +10 -12
  26. data/lib/sidekiq/extensions/action_mailer.rb +10 -20
  27. data/lib/sidekiq/extensions/active_record.rb +9 -7
  28. data/lib/sidekiq/extensions/class_methods.rb +9 -7
  29. data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
  30. data/lib/sidekiq/fetch.rb +11 -12
  31. data/lib/sidekiq/job_logger.rb +45 -7
  32. data/lib/sidekiq/job_retry.rb +67 -58
  33. data/lib/sidekiq/launcher.rb +57 -51
  34. data/lib/sidekiq/logger.rb +165 -0
  35. data/lib/sidekiq/manager.rb +7 -9
  36. data/lib/sidekiq/middleware/chain.rb +14 -4
  37. data/lib/sidekiq/middleware/i18n.rb +5 -7
  38. data/lib/sidekiq/monitor.rb +148 -0
  39. data/lib/sidekiq/paginator.rb +18 -14
  40. data/lib/sidekiq/processor.rb +96 -66
  41. data/lib/sidekiq/rails.rb +23 -29
  42. data/lib/sidekiq/redis_connection.rb +31 -37
  43. data/lib/sidekiq/scheduled.rb +28 -29
  44. data/lib/sidekiq/testing.rb +34 -23
  45. data/lib/sidekiq/testing/inline.rb +2 -1
  46. data/lib/sidekiq/util.rb +17 -14
  47. data/lib/sidekiq/version.rb +2 -1
  48. data/lib/sidekiq/web.rb +41 -49
  49. data/lib/sidekiq/web/action.rb +14 -10
  50. data/lib/sidekiq/web/application.rb +63 -64
  51. data/lib/sidekiq/web/helpers.rb +92 -68
  52. data/lib/sidekiq/web/router.rb +17 -14
  53. data/lib/sidekiq/worker.rb +129 -97
  54. data/sidekiq.gemspec +16 -16
  55. data/web/assets/javascripts/dashboard.js +4 -23
  56. data/web/assets/stylesheets/application-dark.css +125 -0
  57. data/web/assets/stylesheets/application.css +9 -0
  58. data/web/assets/stylesheets/bootstrap.css +1 -1
  59. data/web/locales/ja.yml +2 -1
  60. data/web/views/_job_info.erb +2 -1
  61. data/web/views/busy.erb +4 -1
  62. data/web/views/dead.erb +2 -2
  63. data/web/views/layout.erb +1 -0
  64. data/web/views/morgue.erb +4 -1
  65. data/web/views/queue.erb +10 -1
  66. data/web/views/queues.erb +1 -1
  67. data/web/views/retries.erb +4 -1
  68. data/web/views/retry.erb +2 -2
  69. data/web/views/scheduled.erb +4 -1
  70. metadata +20 -30
  71. data/.travis.yml +0 -17
  72. data/Appraisals +0 -9
  73. data/bin/sidekiqctl +0 -237
  74. data/gemfiles/rails_4.gemfile +0 -31
  75. data/gemfiles/rails_5.gemfile +0 -31
  76. data/lib/sidekiq/core_ext.rb +0 -1
  77. data/lib/sidekiq/logging.rb +0 -122
  78. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
@@ -1,25 +1,63 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  module Sidekiq
3
4
  class JobLogger
5
+ def initialize(logger = Sidekiq.logger)
6
+ @logger = logger
7
+ end
4
8
 
5
9
  def call(item, queue)
6
10
  start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
7
- logger.info("start")
11
+ @logger.info("start")
12
+
8
13
  yield
9
- logger.info("done: #{elapsed(start)} sec")
14
+
15
+ with_elapsed_time_context(start) do
16
+ @logger.info("done")
17
+ end
10
18
  rescue Exception
11
- logger.info("fail: #{elapsed(start)} sec")
19
+ with_elapsed_time_context(start) do
20
+ @logger.info("fail")
21
+ end
22
+
12
23
  raise
13
24
  end
14
25
 
26
+ def prepare(job_hash, &block)
27
+ level = job_hash["log_level"]
28
+ if level
29
+ @logger.log_at(level) do
30
+ Sidekiq::Context.with(job_hash_context(job_hash), &block)
31
+ end
32
+ else
33
+ Sidekiq::Context.with(job_hash_context(job_hash), &block)
34
+ end
35
+ end
36
+
37
+ def job_hash_context(job_hash)
38
+ # If we're using a wrapper class, like ActiveJob, use the "wrapped"
39
+ # attribute to expose the underlying thing.
40
+ h = {
41
+ class: job_hash["wrapped"] || job_hash["class"],
42
+ jid: job_hash["jid"],
43
+ }
44
+ h[:bid] = job_hash["bid"] if job_hash["bid"]
45
+ h[:tags] = job_hash["tags"] if job_hash["tags"]
46
+ h
47
+ end
48
+
49
+ def with_elapsed_time_context(start, &block)
50
+ Sidekiq::Context.with(elapsed_time_context(start), &block)
51
+ end
52
+
53
+ def elapsed_time_context(start)
54
+ {elapsed: elapsed(start).to_s}
55
+ end
56
+
15
57
  private
16
58
 
17
59
  def elapsed(start)
18
60
  (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
19
61
  end
20
-
21
- def logger
22
- Sidekiq.logger
23
- end
24
62
  end
25
63
  end
@@ -1,6 +1,10 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/scheduled'
3
- require 'sidekiq/api'
2
+
3
+ require "sidekiq/scheduled"
4
+ require "sidekiq/api"
5
+
6
+ require "zlib"
7
+ require "base64"
4
8
 
5
9
  module Sidekiq
6
10
  ##
@@ -56,7 +60,8 @@ module Sidekiq
56
60
  # end
57
61
  #
58
62
  class JobRetry
59
- class Skip < ::RuntimeError; end
63
+ class Handled < ::RuntimeError; end
64
+ class Skip < Handled; end
60
65
 
61
66
  include Sidekiq::Util
62
67
 
@@ -71,7 +76,7 @@ module Sidekiq
71
76
  # require the worker to be instantiated.
72
77
  def global(msg, queue)
73
78
  yield
74
- rescue Skip => ex
79
+ rescue Handled => ex
75
80
  raise ex
76
81
  rescue Sidekiq::Shutdown => ey
77
82
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -80,22 +85,19 @@ module Sidekiq
80
85
  # ignore, will be pushed back onto queue during hard_shutdown
81
86
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
82
87
 
83
- if msg['retry']
88
+ if msg["retry"]
84
89
  attempt_retry(nil, msg, queue, e)
85
90
  else
86
91
  Sidekiq.death_handlers.each do |handler|
87
- begin
88
- handler.call(msg, e)
89
- rescue => handler_ex
90
- handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
91
- end
92
+ handler.call(msg, e)
93
+ rescue => handler_ex
94
+ handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
92
95
  end
93
96
  end
94
97
 
95
- raise e
98
+ raise Handled
96
99
  end
97
100
 
98
-
99
101
  # The local retry support means that any errors that occur within
100
102
  # this block can be associated with the given worker instance.
101
103
  # This is required to support the `sidekiq_retries_exhausted` block.
@@ -106,7 +108,7 @@ module Sidekiq
106
108
  # calling the handle_exception handlers.
107
109
  def local(worker, msg, queue)
108
110
  yield
109
- rescue Skip => ex
111
+ rescue Handled => ex
110
112
  raise ex
111
113
  rescue Sidekiq::Shutdown => ey
112
114
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -115,11 +117,11 @@ module Sidekiq
115
117
  # ignore, will be pushed back onto queue during hard_shutdown
116
118
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
117
119
 
118
- if msg['retry'] == nil
119
- msg['retry'] = worker.class.get_sidekiq_options['retry']
120
+ if msg["retry"].nil?
121
+ msg["retry"] = worker.class.get_sidekiq_options["retry"]
120
122
  end
121
123
 
122
- raise e unless msg['retry']
124
+ raise e unless msg["retry"]
123
125
  attempt_retry(worker, msg, queue, e)
124
126
  # We've handled this error associated with this job, don't
125
127
  # need to handle it at the global level
@@ -132,48 +134,44 @@ module Sidekiq
132
134
  # instantiate the worker instance. All access must be guarded and
133
135
  # best effort.
134
136
  def attempt_retry(worker, msg, queue, exception)
135
- max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
137
+ max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
136
138
 
137
- msg['queue'] = if msg['retry_queue']
138
- msg['retry_queue']
139
- else
140
- queue
141
- end
139
+ msg["queue"] = (msg["retry_queue"] || queue)
142
140
 
143
- # App code can stuff all sorts of crazy binary data into the error message
144
- # that won't convert to JSON.
145
- m = exception.message.to_s[0, 10_000]
141
+ m = exception_message(exception)
146
142
  if m.respond_to?(:scrub!)
147
143
  m.force_encoding("utf-8")
148
144
  m.scrub!
149
145
  end
150
146
 
151
- msg['error_message'] = m
152
- msg['error_class'] = exception.class.name
153
- count = if msg['retry_count']
154
- msg['retried_at'] = Time.now.to_f
155
- msg['retry_count'] += 1
147
+ msg["error_message"] = m
148
+ msg["error_class"] = exception.class.name
149
+ count = if msg["retry_count"]
150
+ msg["retried_at"] = Time.now.to_f
151
+ msg["retry_count"] += 1
156
152
  else
157
- msg['failed_at'] = Time.now.to_f
158
- msg['retry_count'] = 0
153
+ msg["failed_at"] = Time.now.to_f
154
+ msg["retry_count"] = 0
159
155
  end
160
156
 
161
- if msg['backtrace'] == true
162
- msg['error_backtrace'] = exception.backtrace
163
- elsif !msg['backtrace']
164
- # do nothing
165
- elsif msg['backtrace'].to_i != 0
166
- msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
157
+ if msg["backtrace"]
158
+ lines = if msg["backtrace"] == true
159
+ exception.backtrace
160
+ else
161
+ exception.backtrace[0...msg["backtrace"].to_i]
162
+ end
163
+
164
+ msg["error_backtrace"] = compress_backtrace(lines)
167
165
  end
168
166
 
169
167
  if count < max_retry_attempts
170
168
  delay = delay_for(worker, count, exception)
171
169
  # Logging here can break retries if the logging device raises ENOSPC #3979
172
- #logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
170
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
173
171
  retry_at = Time.now.to_f + delay
174
172
  payload = Sidekiq.dump_json(msg)
175
173
  Sidekiq.redis do |conn|
176
- conn.zadd('retry', retry_at.to_s, payload)
174
+ conn.zadd("retry", retry_at.to_s, payload)
177
175
  end
178
176
  else
179
177
  # Goodbye dear message, you (re)tried your best I'm sure.
@@ -183,25 +181,23 @@ module Sidekiq
183
181
 
184
182
  def retries_exhausted(worker, msg, exception)
185
183
  begin
186
- block = worker && worker.sidekiq_retries_exhausted_block
187
- block.call(msg, exception) if block
184
+ block = worker&.sidekiq_retries_exhausted_block
185
+ block&.call(msg, exception)
188
186
  rescue => e
189
- handle_exception(e, { context: "Error calling retries_exhausted", job: msg })
187
+ handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
190
188
  end
191
189
 
192
190
  Sidekiq.death_handlers.each do |handler|
193
- begin
194
- handler.call(msg, exception)
195
- rescue => e
196
- handle_exception(e, { context: "Error calling death handler", job: msg })
197
- end
191
+ handler.call(msg, exception)
192
+ rescue => e
193
+ handle_exception(e, {context: "Error calling death handler", job: msg})
198
194
  end
199
195
 
200
- send_to_morgue(msg) unless msg['dead'] == false
196
+ send_to_morgue(msg) unless msg["dead"] == false
201
197
  end
202
198
 
203
199
  def send_to_morgue(msg)
204
- logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
200
+ logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
205
201
  payload = Sidekiq.dump_json(msg)
206
202
  DeadSet.new.kill(payload, notify_failure: false)
207
203
  end
@@ -215,7 +211,7 @@ module Sidekiq
215
211
  end
216
212
 
217
213
  def delay_for(worker, count, exception)
218
- if worker && worker.sidekiq_retry_in_block
214
+ if worker&.sidekiq_retry_in_block
219
215
  custom_retry_in = retry_in(worker, count, exception).to_i
220
216
  return custom_retry_in if custom_retry_in > 0
221
217
  end
@@ -224,16 +220,14 @@ module Sidekiq
224
220
 
225
221
  # delayed_job uses the same basic formula
226
222
  def seconds_to_delay(count)
227
- (count ** 4) + 15 + (rand(30)*(count+1))
223
+ (count**4) + 15 + (rand(30) * (count + 1))
228
224
  end
229
225
 
230
226
  def retry_in(worker, count, exception)
231
- begin
232
- worker.sidekiq_retry_in_block.call(count, exception)
233
- rescue Exception => e
234
- handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
235
- nil
236
- end
227
+ worker.sidekiq_retry_in_block.call(count, exception)
228
+ rescue Exception => e
229
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
230
+ nil
237
231
  end
238
232
 
239
233
  def exception_caused_by_shutdown?(e, checked_causes = [])
@@ -247,5 +241,20 @@ module Sidekiq
247
241
  exception_caused_by_shutdown?(e.cause, checked_causes)
248
242
  end
249
243
 
244
+ # Extract message from exception.
245
+ # Set a default if the message raises an error
246
+ def exception_message(exception)
247
+ # App code can stuff all sorts of crazy binary data into the error message
248
+ # that won't convert to JSON.
249
+ exception.message.to_s[0, 10_000]
250
+ rescue
251
+ +"!!! ERROR MESSAGE THREW AN ERROR !!!"
252
+ end
253
+
254
+ def compress_backtrace(backtrace)
255
+ serialized = Marshal.dump(backtrace)
256
+ compressed = Zlib::Deflate.deflate(serialized)
257
+ Base64.encode64(compressed)
258
+ end
250
259
  end
251
260
  end
@@ -1,19 +1,25 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/manager'
3
- require 'sidekiq/fetch'
4
- require 'sidekiq/scheduled'
2
+
3
+ require "sidekiq/manager"
4
+ require "sidekiq/fetch"
5
+ require "sidekiq/scheduled"
5
6
 
6
7
  module Sidekiq
7
- # The Launcher is a very simple Actor whose job is to
8
- # start, monitor and stop the core Actors in Sidekiq.
9
- # If any of these actors die, the Sidekiq process exits
10
- # immediately.
8
+ # The Launcher starts the Manager and Poller threads and provides the process heartbeat.
11
9
  class Launcher
12
10
  include Util
13
11
 
14
- attr_accessor :manager, :poller, :fetcher
12
+ STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
15
13
 
16
- STATS_TTL = 5*365*24*60*60
14
+ PROCTITLES = [
15
+ proc { "sidekiq" },
16
+ proc { Sidekiq::VERSION },
17
+ proc { |me, data| data["tag"] },
18
+ proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
+ proc { |me, data| "stopping" if me.stopping? },
20
+ ]
21
+
22
+ attr_accessor :manager, :poller, :fetcher
17
23
 
18
24
  def initialize(options)
19
25
  @manager = Sidekiq::Manager.new(options)
@@ -62,10 +68,30 @@ module Sidekiq
62
68
 
63
69
  private unless $TESTING
64
70
 
71
+ def start_heartbeat
72
+ loop do
73
+ heartbeat
74
+ sleep 5
75
+ end
76
+ Sidekiq.logger.info("Heartbeat stopping...")
77
+ end
78
+
79
+ def clear_heartbeat
80
+ # Remove record from Redis since we are shutting down.
81
+ # Note we don't stop the heartbeat thread; if the process
82
+ # doesn't actually exit, it'll reappear in the Web UI.
83
+ Sidekiq.redis do |conn|
84
+ conn.pipelined do
85
+ conn.srem("processes", identity)
86
+ conn.del("#{identity}:workers")
87
+ end
88
+ end
89
+ rescue
90
+ # best effort, ignore network errors
91
+ end
92
+
65
93
  def heartbeat
66
- results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
67
- results.compact!
68
- $0 = results.join(' ')
94
+ $0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
69
95
 
70
96
 
71
97
  end
@@ -73,6 +99,7 @@ module Sidekiq
73
99
  def ❤
74
100
  key = identity
75
101
  fails = procd = 0
102
+
76
103
  begin
77
104
  fails = Processor::FAILURE.reset
78
105
  procd = Processor::PROCESSED.reset
@@ -80,6 +107,7 @@ module Sidekiq
80
107
 
81
108
  workers_key = "#{key}:workers"
82
109
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
110
+
83
111
  Sidekiq.redis do |conn|
84
112
  conn.multi do
85
113
  conn.incrby("stat:processed", procd)
@@ -97,24 +125,25 @@ module Sidekiq
97
125
  conn.expire(workers_key, 60)
98
126
  end
99
127
  end
128
+
100
129
  fails = procd = 0
101
130
 
102
- _, exists, _, _, msg = Sidekiq.redis do |conn|
103
- conn.multi do
104
- conn.sadd('processes', key)
131
+ _, exists, _, _, msg = Sidekiq.redis { |conn|
132
+ conn.multi {
133
+ conn.sadd("processes", key)
105
134
  conn.exists(key)
106
- conn.hmset(key, 'info', to_json, 'busy', curstate.size, 'beat', Time.now.to_f, 'quiet', @done)
135
+ conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
107
136
  conn.expire(key, 60)
108
137
  conn.rpop("#{key}-signals")
109
- end
110
- end
138
+ }
139
+ }
111
140
 
112
141
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
113
- fire_event(:heartbeat) if !exists
142
+ fire_event(:heartbeat) unless exists
114
143
 
115
144
  return unless msg
116
145
 
117
- ::Process.kill(msg, $$)
146
+ ::Process.kill(msg, ::Process.pid)
118
147
  rescue => e
119
148
  # ignore all redis/network issues
120
149
  logger.error("heartbeat: #{e.message}")
@@ -124,25 +153,17 @@ module Sidekiq
124
153
  end
125
154
  end
126
155
 
127
- def start_heartbeat
128
- while true
129
- heartbeat
130
- sleep 5
131
- end
132
- Sidekiq.logger.info("Heartbeat stopping...")
133
- end
134
-
135
156
  def to_data
136
157
  @data ||= begin
137
158
  {
138
- 'hostname' => hostname,
139
- 'started_at' => Time.now.to_f,
140
- 'pid' => $$,
141
- 'tag' => @options[:tag] || '',
142
- 'concurrency' => @options[:concurrency],
143
- 'queues' => @options[:queues].uniq,
144
- 'labels' => @options[:labels],
145
- 'identity' => identity,
159
+ "hostname" => hostname,
160
+ "started_at" => Time.now.to_f,
161
+ "pid" => ::Process.pid,
162
+ "tag" => @options[:tag] || "",
163
+ "concurrency" => @options[:concurrency],
164
+ "queues" => @options[:queues].uniq,
165
+ "labels" => @options[:labels],
166
+ "identity" => identity,
146
167
  }
147
168
  end
148
169
  end
@@ -154,20 +175,5 @@ module Sidekiq
154
175
  Sidekiq.dump_json(to_data)
155
176
  end
156
177
  end
157
-
158
- def clear_heartbeat
159
- # Remove record from Redis since we are shutting down.
160
- # Note we don't stop the heartbeat thread; if the process
161
- # doesn't actually exit, it'll reappear in the Web UI.
162
- Sidekiq.redis do |conn|
163
- conn.pipelined do
164
- conn.srem('processes', identity)
165
- conn.del("#{identity}:workers")
166
- end
167
- end
168
- rescue
169
- # best effort, ignore network errors
170
- end
171
-
172
178
  end
173
179
  end