sidekiq 5.2.7 → 6.0.3

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (74) hide show
  1. checksums.yaml +4 -4
  2. data/.circleci/config.yml +21 -0
  3. data/.gitignore +0 -2
  4. data/.standard.yml +20 -0
  5. data/6.0-Upgrade.md +72 -0
  6. data/Changes.md +121 -0
  7. data/Ent-2.0-Upgrade.md +37 -0
  8. data/Ent-Changes.md +18 -0
  9. data/Gemfile +12 -11
  10. data/Gemfile.lock +196 -0
  11. data/Pro-5.0-Upgrade.md +25 -0
  12. data/Pro-Changes.md +18 -1
  13. data/README.md +18 -30
  14. data/Rakefile +5 -4
  15. data/bin/sidekiqload +32 -24
  16. data/bin/sidekiqmon +8 -0
  17. data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
  18. data/lib/generators/sidekiq/worker_generator.rb +20 -12
  19. data/lib/sidekiq.rb +61 -42
  20. data/lib/sidekiq/api.rb +230 -214
  21. data/lib/sidekiq/cli.rb +111 -174
  22. data/lib/sidekiq/client.rb +55 -46
  23. data/lib/sidekiq/delay.rb +5 -6
  24. data/lib/sidekiq/exception_handler.rb +10 -12
  25. data/lib/sidekiq/extensions/action_mailer.rb +10 -20
  26. data/lib/sidekiq/extensions/active_record.rb +9 -7
  27. data/lib/sidekiq/extensions/class_methods.rb +9 -7
  28. data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
  29. data/lib/sidekiq/fetch.rb +11 -12
  30. data/lib/sidekiq/job_logger.rb +45 -7
  31. data/lib/sidekiq/job_retry.rb +60 -60
  32. data/lib/sidekiq/launcher.rb +57 -51
  33. data/lib/sidekiq/logger.rb +165 -0
  34. data/lib/sidekiq/manager.rb +7 -9
  35. data/lib/sidekiq/middleware/chain.rb +14 -4
  36. data/lib/sidekiq/middleware/i18n.rb +5 -7
  37. data/lib/sidekiq/monitor.rb +133 -0
  38. data/lib/sidekiq/paginator.rb +18 -14
  39. data/lib/sidekiq/processor.rb +67 -66
  40. data/lib/sidekiq/rails.rb +23 -29
  41. data/lib/sidekiq/redis_connection.rb +31 -37
  42. data/lib/sidekiq/scheduled.rb +28 -29
  43. data/lib/sidekiq/testing.rb +34 -23
  44. data/lib/sidekiq/testing/inline.rb +2 -1
  45. data/lib/sidekiq/util.rb +17 -16
  46. data/lib/sidekiq/version.rb +2 -1
  47. data/lib/sidekiq/web.rb +41 -49
  48. data/lib/sidekiq/web/action.rb +14 -10
  49. data/lib/sidekiq/web/application.rb +64 -66
  50. data/lib/sidekiq/web/helpers.rb +83 -72
  51. data/lib/sidekiq/web/router.rb +17 -14
  52. data/lib/sidekiq/worker.rb +124 -97
  53. data/sidekiq.gemspec +16 -16
  54. data/web/assets/javascripts/dashboard.js +4 -23
  55. data/web/assets/stylesheets/application-dark.css +125 -0
  56. data/web/assets/stylesheets/application.css +9 -0
  57. data/web/locales/de.yml +14 -2
  58. data/web/locales/ja.yml +2 -1
  59. data/web/views/_job_info.erb +2 -1
  60. data/web/views/busy.erb +4 -1
  61. data/web/views/dead.erb +2 -2
  62. data/web/views/layout.erb +1 -0
  63. data/web/views/morgue.erb +4 -1
  64. data/web/views/queue.erb +10 -1
  65. data/web/views/retries.erb +4 -1
  66. data/web/views/retry.erb +2 -2
  67. data/web/views/scheduled.erb +4 -1
  68. metadata +20 -29
  69. data/.travis.yml +0 -11
  70. data/bin/sidekiqctl +0 -20
  71. data/lib/sidekiq/core_ext.rb +0 -1
  72. data/lib/sidekiq/ctl.rb +0 -221
  73. data/lib/sidekiq/logging.rb +0 -122
  74. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
@@ -1,25 +1,63 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  module Sidekiq
3
4
  class JobLogger
5
+ def initialize(logger = Sidekiq.logger)
6
+ @logger = logger
7
+ end
4
8
 
5
9
  def call(item, queue)
6
10
  start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
7
- logger.info("start")
11
+ @logger.info("start")
12
+
8
13
  yield
9
- logger.info("done: #{elapsed(start)} sec")
14
+
15
+ with_elapsed_time_context(start) do
16
+ @logger.info("done")
17
+ end
10
18
  rescue Exception
11
- logger.info("fail: #{elapsed(start)} sec")
19
+ with_elapsed_time_context(start) do
20
+ @logger.info("fail")
21
+ end
22
+
12
23
  raise
13
24
  end
14
25
 
26
+ def prepare(job_hash, &block)
27
+ level = job_hash["log_level"]
28
+ if level
29
+ @logger.log_at(level) do
30
+ Sidekiq::Context.with(job_hash_context(job_hash), &block)
31
+ end
32
+ else
33
+ Sidekiq::Context.with(job_hash_context(job_hash), &block)
34
+ end
35
+ end
36
+
37
+ def job_hash_context(job_hash)
38
+ # If we're using a wrapper class, like ActiveJob, use the "wrapped"
39
+ # attribute to expose the underlying thing.
40
+ h = {
41
+ class: job_hash["wrapped"] || job_hash["class"],
42
+ jid: job_hash["jid"],
43
+ }
44
+ h[:bid] = job_hash["bid"] if job_hash["bid"]
45
+ h[:tags] = job_hash["tags"] if job_hash["tags"]
46
+ h
47
+ end
48
+
49
+ def with_elapsed_time_context(start, &block)
50
+ Sidekiq::Context.with(elapsed_time_context(start), &block)
51
+ end
52
+
53
+ def elapsed_time_context(start)
54
+ {elapsed: elapsed(start).to_s}
55
+ end
56
+
15
57
  private
16
58
 
17
59
  def elapsed(start)
18
60
  (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
19
61
  end
20
-
21
- def logger
22
- Sidekiq.logger
23
- end
24
62
  end
25
63
  end
@@ -1,6 +1,10 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/scheduled'
3
- require 'sidekiq/api'
2
+
3
+ require "sidekiq/scheduled"
4
+ require "sidekiq/api"
5
+
6
+ require "zlib"
7
+ require "base64"
4
8
 
5
9
  module Sidekiq
6
10
  ##
@@ -70,7 +74,7 @@ module Sidekiq
70
74
  # The global retry handler requires only the barest of data.
71
75
  # We want to be able to retry as much as possible so we don't
72
76
  # require the worker to be instantiated.
73
- def global(msg, queue)
77
+ def global(jobstr, queue)
74
78
  yield
75
79
  rescue Handled => ex
76
80
  raise ex
@@ -81,22 +85,20 @@ module Sidekiq
81
85
  # ignore, will be pushed back onto queue during hard_shutdown
82
86
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
83
87
 
84
- if msg['retry']
88
+ msg = Sidekiq.load_json(jobstr)
89
+ if msg["retry"]
85
90
  attempt_retry(nil, msg, queue, e)
86
91
  else
87
92
  Sidekiq.death_handlers.each do |handler|
88
- begin
89
- handler.call(msg, e)
90
- rescue => handler_ex
91
- handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
92
- end
93
+ handler.call(msg, e)
94
+ rescue => handler_ex
95
+ handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
93
96
  end
94
97
  end
95
98
 
96
99
  raise Handled
97
100
  end
98
101
 
99
-
100
102
  # The local retry support means that any errors that occur within
101
103
  # this block can be associated with the given worker instance.
102
104
  # This is required to support the `sidekiq_retries_exhausted` block.
@@ -105,7 +107,7 @@ module Sidekiq
105
107
  # exception so the global block does not reprocess the error. The
106
108
  # Skip exception is unwrapped within Sidekiq::Processor#process before
107
109
  # calling the handle_exception handlers.
108
- def local(worker, msg, queue)
110
+ def local(worker, jobstr, queue)
109
111
  yield
110
112
  rescue Handled => ex
111
113
  raise ex
@@ -116,11 +118,12 @@ module Sidekiq
116
118
  # ignore, will be pushed back onto queue during hard_shutdown
117
119
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
118
120
 
119
- if msg['retry'] == nil
120
- msg['retry'] = worker.class.get_sidekiq_options['retry']
121
+ msg = Sidekiq.load_json(jobstr)
122
+ if msg["retry"].nil?
123
+ msg["retry"] = worker.class.get_sidekiq_options["retry"]
121
124
  end
122
125
 
123
- raise e unless msg['retry']
126
+ raise e unless msg["retry"]
124
127
  attempt_retry(worker, msg, queue, e)
125
128
  # We've handled this error associated with this job, don't
126
129
  # need to handle it at the global level
@@ -133,13 +136,9 @@ module Sidekiq
133
136
  # instantiate the worker instance. All access must be guarded and
134
137
  # best effort.
135
138
  def attempt_retry(worker, msg, queue, exception)
136
- max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
139
+ max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
137
140
 
138
- msg['queue'] = if msg['retry_queue']
139
- msg['retry_queue']
140
- else
141
- queue
142
- end
141
+ msg["queue"] = (msg["retry_queue"] || queue)
143
142
 
144
143
  m = exception_message(exception)
145
144
  if m.respond_to?(:scrub!)
@@ -147,32 +146,34 @@ module Sidekiq
147
146
  m.scrub!
148
147
  end
149
148
 
150
- msg['error_message'] = m
151
- msg['error_class'] = exception.class.name
152
- count = if msg['retry_count']
153
- msg['retried_at'] = Time.now.to_f
154
- msg['retry_count'] += 1
149
+ msg["error_message"] = m
150
+ msg["error_class"] = exception.class.name
151
+ count = if msg["retry_count"]
152
+ msg["retried_at"] = Time.now.to_f
153
+ msg["retry_count"] += 1
155
154
  else
156
- msg['failed_at'] = Time.now.to_f
157
- msg['retry_count'] = 0
155
+ msg["failed_at"] = Time.now.to_f
156
+ msg["retry_count"] = 0
158
157
  end
159
158
 
160
- if msg['backtrace'] == true
161
- msg['error_backtrace'] = exception.backtrace
162
- elsif !msg['backtrace']
163
- # do nothing
164
- elsif msg['backtrace'].to_i != 0
165
- msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
159
+ if msg["backtrace"]
160
+ lines = if msg["backtrace"] == true
161
+ exception.backtrace
162
+ else
163
+ exception.backtrace[0...msg["backtrace"].to_i]
164
+ end
165
+
166
+ msg["error_backtrace"] = compress_backtrace(lines)
166
167
  end
167
168
 
168
169
  if count < max_retry_attempts
169
170
  delay = delay_for(worker, count, exception)
170
171
  # Logging here can break retries if the logging device raises ENOSPC #3979
171
- #logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
172
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
172
173
  retry_at = Time.now.to_f + delay
173
174
  payload = Sidekiq.dump_json(msg)
174
175
  Sidekiq.redis do |conn|
175
- conn.zadd('retry', retry_at.to_s, payload)
176
+ conn.zadd("retry", retry_at.to_s, payload)
176
177
  end
177
178
  else
178
179
  # Goodbye dear message, you (re)tried your best I'm sure.
@@ -182,25 +183,23 @@ module Sidekiq
182
183
 
183
184
  def retries_exhausted(worker, msg, exception)
184
185
  begin
185
- block = worker && worker.sidekiq_retries_exhausted_block
186
- block.call(msg, exception) if block
186
+ block = worker&.sidekiq_retries_exhausted_block
187
+ block&.call(msg, exception)
187
188
  rescue => e
188
- handle_exception(e, { context: "Error calling retries_exhausted", job: msg })
189
+ handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
189
190
  end
190
191
 
191
192
  Sidekiq.death_handlers.each do |handler|
192
- begin
193
- handler.call(msg, exception)
194
- rescue => e
195
- handle_exception(e, { context: "Error calling death handler", job: msg })
196
- end
193
+ handler.call(msg, exception)
194
+ rescue => e
195
+ handle_exception(e, {context: "Error calling death handler", job: msg})
197
196
  end
198
197
 
199
- send_to_morgue(msg) unless msg['dead'] == false
198
+ send_to_morgue(msg) unless msg["dead"] == false
200
199
  end
201
200
 
202
201
  def send_to_morgue(msg)
203
- logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
202
+ logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
204
203
  payload = Sidekiq.dump_json(msg)
205
204
  DeadSet.new.kill(payload, notify_failure: false)
206
205
  end
@@ -214,7 +213,7 @@ module Sidekiq
214
213
  end
215
214
 
216
215
  def delay_for(worker, count, exception)
217
- if worker && worker.sidekiq_retry_in_block
216
+ if worker&.sidekiq_retry_in_block
218
217
  custom_retry_in = retry_in(worker, count, exception).to_i
219
218
  return custom_retry_in if custom_retry_in > 0
220
219
  end
@@ -223,16 +222,14 @@ module Sidekiq
223
222
 
224
223
  # delayed_job uses the same basic formula
225
224
  def seconds_to_delay(count)
226
- (count ** 4) + 15 + (rand(30)*(count+1))
225
+ (count**4) + 15 + (rand(30) * (count + 1))
227
226
  end
228
227
 
229
228
  def retry_in(worker, count, exception)
230
- begin
231
- worker.sidekiq_retry_in_block.call(count, exception)
232
- rescue Exception => e
233
- handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
234
- nil
235
- end
229
+ worker.sidekiq_retry_in_block.call(count, exception)
230
+ rescue Exception => e
231
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
232
+ nil
236
233
  end
237
234
 
238
235
  def exception_caused_by_shutdown?(e, checked_causes = [])
@@ -249,14 +246,17 @@ module Sidekiq
249
246
  # Extract message from exception.
250
247
  # Set a default if the message raises an error
251
248
  def exception_message(exception)
252
- begin
253
- # App code can stuff all sorts of crazy binary data into the error message
254
- # that won't convert to JSON.
255
- exception.message.to_s[0, 10_000]
256
- rescue
257
- "!!! ERROR MESSAGE THREW AN ERROR !!!".dup
258
- end
249
+ # App code can stuff all sorts of crazy binary data into the error message
250
+ # that won't convert to JSON.
251
+ exception.message.to_s[0, 10_000]
252
+ rescue
253
+ +"!!! ERROR MESSAGE THREW AN ERROR !!!"
259
254
  end
260
255
 
256
+ def compress_backtrace(backtrace)
257
+ serialized = Sidekiq.dump_json(backtrace)
258
+ compressed = Zlib::Deflate.deflate(serialized)
259
+ Base64.encode64(compressed)
260
+ end
261
261
  end
262
262
  end
@@ -1,19 +1,25 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/manager'
3
- require 'sidekiq/fetch'
4
- require 'sidekiq/scheduled'
2
+
3
+ require "sidekiq/manager"
4
+ require "sidekiq/fetch"
5
+ require "sidekiq/scheduled"
5
6
 
6
7
  module Sidekiq
7
- # The Launcher is a very simple Actor whose job is to
8
- # start, monitor and stop the core Actors in Sidekiq.
9
- # If any of these actors die, the Sidekiq process exits
10
- # immediately.
8
+ # The Launcher starts the Manager and Poller threads and provides the process heartbeat.
11
9
  class Launcher
12
10
  include Util
13
11
 
14
- attr_accessor :manager, :poller, :fetcher
12
+ STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
15
13
 
16
- STATS_TTL = 5*365*24*60*60
14
+ PROCTITLES = [
15
+ proc { "sidekiq" },
16
+ proc { Sidekiq::VERSION },
17
+ proc { |me, data| data["tag"] },
18
+ proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
+ proc { |me, data| "stopping" if me.stopping? },
20
+ ]
21
+
22
+ attr_accessor :manager, :poller, :fetcher
17
23
 
18
24
  def initialize(options)
19
25
  @manager = Sidekiq::Manager.new(options)
@@ -62,10 +68,30 @@ module Sidekiq
62
68
 
63
69
  private unless $TESTING
64
70
 
71
+ def start_heartbeat
72
+ loop do
73
+ heartbeat
74
+ sleep 5
75
+ end
76
+ Sidekiq.logger.info("Heartbeat stopping...")
77
+ end
78
+
79
+ def clear_heartbeat
80
+ # Remove record from Redis since we are shutting down.
81
+ # Note we don't stop the heartbeat thread; if the process
82
+ # doesn't actually exit, it'll reappear in the Web UI.
83
+ Sidekiq.redis do |conn|
84
+ conn.pipelined do
85
+ conn.srem("processes", identity)
86
+ conn.del("#{identity}:workers")
87
+ end
88
+ end
89
+ rescue
90
+ # best effort, ignore network errors
91
+ end
92
+
65
93
  def heartbeat
66
- results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
67
- results.compact!
68
- $0 = results.join(' ')
94
+ $0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
69
95
 
70
96
 
71
97
  end
@@ -73,6 +99,7 @@ module Sidekiq
73
99
  def ❤
74
100
  key = identity
75
101
  fails = procd = 0
102
+
76
103
  begin
77
104
  fails = Processor::FAILURE.reset
78
105
  procd = Processor::PROCESSED.reset
@@ -80,6 +107,7 @@ module Sidekiq
80
107
 
81
108
  workers_key = "#{key}:workers"
82
109
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
110
+
83
111
  Sidekiq.redis do |conn|
84
112
  conn.multi do
85
113
  conn.incrby("stat:processed", procd)
@@ -97,24 +125,25 @@ module Sidekiq
97
125
  conn.expire(workers_key, 60)
98
126
  end
99
127
  end
128
+
100
129
  fails = procd = 0
101
130
 
102
- _, exists, _, _, msg = Sidekiq.redis do |conn|
103
- conn.multi do
104
- conn.sadd('processes', key)
131
+ _, exists, _, _, msg = Sidekiq.redis { |conn|
132
+ conn.multi {
133
+ conn.sadd("processes", key)
105
134
  conn.exists(key)
106
- conn.hmset(key, 'info', to_json, 'busy', curstate.size, 'beat', Time.now.to_f, 'quiet', @done)
135
+ conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
107
136
  conn.expire(key, 60)
108
137
  conn.rpop("#{key}-signals")
109
- end
110
- end
138
+ }
139
+ }
111
140
 
112
141
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
113
- fire_event(:heartbeat) if !exists
142
+ fire_event(:heartbeat) unless exists
114
143
 
115
144
  return unless msg
116
145
 
117
- ::Process.kill(msg, $$)
146
+ ::Process.kill(msg, ::Process.pid)
118
147
  rescue => e
119
148
  # ignore all redis/network issues
120
149
  logger.error("heartbeat: #{e.message}")
@@ -124,25 +153,17 @@ module Sidekiq
124
153
  end
125
154
  end
126
155
 
127
- def start_heartbeat
128
- while true
129
- heartbeat
130
- sleep 5
131
- end
132
- Sidekiq.logger.info("Heartbeat stopping...")
133
- end
134
-
135
156
  def to_data
136
157
  @data ||= begin
137
158
  {
138
- 'hostname' => hostname,
139
- 'started_at' => Time.now.to_f,
140
- 'pid' => $$,
141
- 'tag' => @options[:tag] || '',
142
- 'concurrency' => @options[:concurrency],
143
- 'queues' => @options[:queues].uniq,
144
- 'labels' => @options[:labels],
145
- 'identity' => identity,
159
+ "hostname" => hostname,
160
+ "started_at" => Time.now.to_f,
161
+ "pid" => ::Process.pid,
162
+ "tag" => @options[:tag] || "",
163
+ "concurrency" => @options[:concurrency],
164
+ "queues" => @options[:queues].uniq,
165
+ "labels" => @options[:labels],
166
+ "identity" => identity,
146
167
  }
147
168
  end
148
169
  end
@@ -154,20 +175,5 @@ module Sidekiq
154
175
  Sidekiq.dump_json(to_data)
155
176
  end
156
177
  end
157
-
158
- def clear_heartbeat
159
- # Remove record from Redis since we are shutting down.
160
- # Note we don't stop the heartbeat thread; if the process
161
- # doesn't actually exit, it'll reappear in the Web UI.
162
- Sidekiq.redis do |conn|
163
- conn.pipelined do
164
- conn.srem('processes', identity)
165
- conn.del("#{identity}:workers")
166
- end
167
- end
168
- rescue
169
- # best effort, ignore network errors
170
- end
171
-
172
178
  end
173
179
  end