sidekiq 5.2.1 → 6.0.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (70) hide show
  1. checksums.yaml +5 -5
  2. data/.circleci/config.yml +61 -0
  3. data/.gitignore +1 -1
  4. data/.standard.yml +20 -0
  5. data/6.0-Upgrade.md +70 -0
  6. data/COMM-LICENSE +11 -9
  7. data/Changes.md +79 -0
  8. data/Ent-2.0-Upgrade.md +37 -0
  9. data/Ent-Changes.md +30 -1
  10. data/Gemfile +19 -9
  11. data/Gemfile.lock +196 -0
  12. data/Pro-5.0-Upgrade.md +25 -0
  13. data/Pro-Changes.md +29 -0
  14. data/README.md +17 -31
  15. data/Rakefile +6 -4
  16. data/bin/sidekiqload +27 -23
  17. data/bin/sidekiqmon +9 -0
  18. data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
  19. data/lib/generators/sidekiq/worker_generator.rb +12 -14
  20. data/lib/sidekiq.rb +56 -43
  21. data/lib/sidekiq/api.rb +141 -148
  22. data/lib/sidekiq/cli.rb +142 -207
  23. data/lib/sidekiq/client.rb +45 -46
  24. data/lib/sidekiq/delay.rb +5 -6
  25. data/lib/sidekiq/exception_handler.rb +10 -12
  26. data/lib/sidekiq/extensions/action_mailer.rb +10 -20
  27. data/lib/sidekiq/extensions/active_record.rb +9 -7
  28. data/lib/sidekiq/extensions/class_methods.rb +9 -7
  29. data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
  30. data/lib/sidekiq/fetch.rb +5 -6
  31. data/lib/sidekiq/job_logger.rb +39 -9
  32. data/lib/sidekiq/job_retry.rb +62 -54
  33. data/lib/sidekiq/launcher.rb +60 -52
  34. data/lib/sidekiq/logger.rb +69 -0
  35. data/lib/sidekiq/manager.rb +10 -12
  36. data/lib/sidekiq/middleware/chain.rb +3 -2
  37. data/lib/sidekiq/middleware/i18n.rb +5 -7
  38. data/lib/sidekiq/monitor.rb +148 -0
  39. data/lib/sidekiq/paginator.rb +11 -12
  40. data/lib/sidekiq/processor.rb +98 -62
  41. data/lib/sidekiq/rails.rb +24 -29
  42. data/lib/sidekiq/redis_connection.rb +34 -21
  43. data/lib/sidekiq/scheduled.rb +17 -19
  44. data/lib/sidekiq/testing.rb +22 -23
  45. data/lib/sidekiq/testing/inline.rb +2 -1
  46. data/lib/sidekiq/util.rb +17 -14
  47. data/lib/sidekiq/version.rb +2 -1
  48. data/lib/sidekiq/web.rb +41 -49
  49. data/lib/sidekiq/web/action.rb +14 -10
  50. data/lib/sidekiq/web/application.rb +67 -58
  51. data/lib/sidekiq/web/helpers.rb +72 -66
  52. data/lib/sidekiq/web/router.rb +17 -14
  53. data/lib/sidekiq/worker.rb +134 -91
  54. data/sidekiq.gemspec +16 -18
  55. data/web/assets/javascripts/dashboard.js +14 -23
  56. data/web/assets/stylesheets/application.css +35 -2
  57. data/web/assets/stylesheets/bootstrap.css +1 -1
  58. data/web/locales/ar.yml +1 -0
  59. data/web/locales/en.yml +1 -0
  60. data/web/locales/ja.yml +2 -1
  61. data/web/views/_nav.erb +3 -17
  62. data/web/views/queue.erb +1 -0
  63. data/web/views/queues.erb +1 -1
  64. data/web/views/retries.erb +4 -0
  65. metadata +31 -26
  66. data/.travis.yml +0 -14
  67. data/bin/sidekiqctl +0 -99
  68. data/lib/sidekiq/core_ext.rb +0 -1
  69. data/lib/sidekiq/logging.rb +0 -122
  70. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
@@ -1,25 +1,55 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  module Sidekiq
3
4
  class JobLogger
5
+ def initialize(logger = Sidekiq.logger)
6
+ @logger = logger
7
+ end
4
8
 
5
9
  def call(item, queue)
6
- start = Time.now
7
- logger.info("start")
10
+ start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
11
+ @logger.info("start")
12
+
8
13
  yield
9
- logger.info("done: #{elapsed(start)} sec")
14
+
15
+ with_elapsed_time_context(start) do
16
+ @logger.info("done")
17
+ end
10
18
  rescue Exception
11
- logger.info("fail: #{elapsed(start)} sec")
19
+ with_elapsed_time_context(start) do
20
+ @logger.info("fail")
21
+ end
22
+
12
23
  raise
13
24
  end
14
25
 
15
- private
26
+ def with_job_hash_context(job_hash, &block)
27
+ @logger.with_context(job_hash_context(job_hash), &block)
28
+ end
16
29
 
17
- def elapsed(start)
18
- (Time.now - start).round(3)
30
+ def job_hash_context(job_hash)
31
+ # If we're using a wrapper class, like ActiveJob, use the "wrapped"
32
+ # attribute to expose the underlying thing.
33
+ h = {
34
+ class: job_hash["wrapped"] || job_hash["class"],
35
+ jid: job_hash["jid"],
36
+ }
37
+ h[:bid] = job_hash["bid"] if job_hash["bid"]
38
+ h
19
39
  end
20
40
 
21
- def logger
22
- Sidekiq.logger
41
+ def with_elapsed_time_context(start, &block)
42
+ @logger.with_context(elapsed_time_context(start), &block)
43
+ end
44
+
45
+ def elapsed_time_context(start)
46
+ {elapsed: elapsed(start).to_s}
47
+ end
48
+
49
+ private
50
+
51
+ def elapsed(start)
52
+ (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
23
53
  end
24
54
  end
25
55
  end
@@ -1,6 +1,7 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/scheduled'
3
- require 'sidekiq/api'
2
+
3
+ require "sidekiq/scheduled"
4
+ require "sidekiq/api"
4
5
 
5
6
  module Sidekiq
6
7
  ##
@@ -56,7 +57,8 @@ module Sidekiq
56
57
  # end
57
58
  #
58
59
  class JobRetry
59
- class Skip < ::RuntimeError; end
60
+ class Handled < ::RuntimeError; end
61
+ class Skip < Handled; end
60
62
 
61
63
  include Sidekiq::Util
62
64
 
@@ -71,7 +73,7 @@ module Sidekiq
71
73
  # require the worker to be instantiated.
72
74
  def global(msg, queue)
73
75
  yield
74
- rescue Skip => ex
76
+ rescue Handled => ex
75
77
  raise ex
76
78
  rescue Sidekiq::Shutdown => ey
77
79
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -80,11 +82,18 @@ module Sidekiq
80
82
  # ignore, will be pushed back onto queue during hard_shutdown
81
83
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
82
84
 
83
- raise e unless msg['retry']
84
- attempt_retry(nil, msg, queue, e)
85
- raise e
86
- end
85
+ if msg["retry"]
86
+ attempt_retry(nil, msg, queue, e)
87
+ else
88
+ Sidekiq.death_handlers.each do |handler|
89
+ handler.call(msg, e)
90
+ rescue => handler_ex
91
+ handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
92
+ end
93
+ end
87
94
 
95
+ raise Handled
96
+ end
88
97
 
89
98
  # The local retry support means that any errors that occur within
90
99
  # this block can be associated with the given worker instance.
@@ -96,7 +105,7 @@ module Sidekiq
96
105
  # calling the handle_exception handlers.
97
106
  def local(worker, msg, queue)
98
107
  yield
99
- rescue Skip => ex
108
+ rescue Handled => ex
100
109
  raise ex
101
110
  rescue Sidekiq::Shutdown => ey
102
111
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -105,11 +114,11 @@ module Sidekiq
105
114
  # ignore, will be pushed back onto queue during hard_shutdown
106
115
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
107
116
 
108
- if msg['retry'] == nil
109
- msg['retry'] = worker.class.get_sidekiq_options['retry']
117
+ if msg["retry"].nil?
118
+ msg["retry"] = worker.class.get_sidekiq_options["retry"]
110
119
  end
111
120
 
112
- raise e unless msg['retry']
121
+ raise e unless msg["retry"]
113
122
  attempt_retry(worker, msg, queue, e)
114
123
  # We've handled this error associated with this job, don't
115
124
  # need to handle it at the global level
@@ -122,47 +131,42 @@ module Sidekiq
122
131
  # instantiate the worker instance. All access must be guarded and
123
132
  # best effort.
124
133
  def attempt_retry(worker, msg, queue, exception)
125
- max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
134
+ max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
126
135
 
127
- msg['queue'] = if msg['retry_queue']
128
- msg['retry_queue']
129
- else
130
- queue
131
- end
136
+ msg["queue"] = (msg["retry_queue"] || queue)
132
137
 
133
- # App code can stuff all sorts of crazy binary data into the error message
134
- # that won't convert to JSON.
135
- m = exception.message.to_s[0, 10_000]
138
+ m = exception_message(exception)
136
139
  if m.respond_to?(:scrub!)
137
140
  m.force_encoding("utf-8")
138
141
  m.scrub!
139
142
  end
140
143
 
141
- msg['error_message'] = m
142
- msg['error_class'] = exception.class.name
143
- count = if msg['retry_count']
144
- msg['retried_at'] = Time.now.to_f
145
- msg['retry_count'] += 1
144
+ msg["error_message"] = m
145
+ msg["error_class"] = exception.class.name
146
+ count = if msg["retry_count"]
147
+ msg["retried_at"] = Time.now.to_f
148
+ msg["retry_count"] += 1
146
149
  else
147
- msg['failed_at'] = Time.now.to_f
148
- msg['retry_count'] = 0
150
+ msg["failed_at"] = Time.now.to_f
151
+ msg["retry_count"] = 0
149
152
  end
150
153
 
151
- if msg['backtrace'] == true
152
- msg['error_backtrace'] = exception.backtrace
153
- elsif !msg['backtrace']
154
+ if msg["backtrace"] == true
155
+ msg["error_backtrace"] = exception.backtrace
156
+ elsif !msg["backtrace"]
154
157
  # do nothing
155
- elsif msg['backtrace'].to_i != 0
156
- msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
158
+ elsif msg["backtrace"].to_i != 0
159
+ msg["error_backtrace"] = exception.backtrace[0...msg["backtrace"].to_i]
157
160
  end
158
161
 
159
162
  if count < max_retry_attempts
160
163
  delay = delay_for(worker, count, exception)
161
- logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
164
+ # Logging here can break retries if the logging device raises ENOSPC #3979
165
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
162
166
  retry_at = Time.now.to_f + delay
163
167
  payload = Sidekiq.dump_json(msg)
164
168
  Sidekiq.redis do |conn|
165
- conn.zadd('retry', retry_at.to_s, payload)
169
+ conn.zadd("retry", retry_at.to_s, payload)
166
170
  end
167
171
  else
168
172
  # Goodbye dear message, you (re)tried your best I'm sure.
@@ -171,27 +175,24 @@ module Sidekiq
171
175
  end
172
176
 
173
177
  def retries_exhausted(worker, msg, exception)
174
- logger.debug { "Retries exhausted for job" }
175
178
  begin
176
- block = worker && worker.sidekiq_retries_exhausted_block
177
- block.call(msg, exception) if block
179
+ block = worker&.sidekiq_retries_exhausted_block
180
+ block&.call(msg, exception)
178
181
  rescue => e
179
- handle_exception(e, { context: "Error calling retries_exhausted", job: msg })
182
+ handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
180
183
  end
181
184
 
182
185
  Sidekiq.death_handlers.each do |handler|
183
- begin
184
- handler.call(msg, exception)
185
- rescue => e
186
- handle_exception(e, { context: "Error calling death handler", job: msg })
187
- end
186
+ handler.call(msg, exception)
187
+ rescue => e
188
+ handle_exception(e, {context: "Error calling death handler", job: msg})
188
189
  end
189
190
 
190
- send_to_morgue(msg) unless msg['dead'] == false
191
+ send_to_morgue(msg) unless msg["dead"] == false
191
192
  end
192
193
 
193
194
  def send_to_morgue(msg)
194
- Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
195
+ logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
195
196
  payload = Sidekiq.dump_json(msg)
196
197
  DeadSet.new.kill(payload, notify_failure: false)
197
198
  end
@@ -205,7 +206,7 @@ module Sidekiq
205
206
  end
206
207
 
207
208
  def delay_for(worker, count, exception)
208
- if worker && worker.sidekiq_retry_in_block
209
+ if worker&.sidekiq_retry_in_block
209
210
  custom_retry_in = retry_in(worker, count, exception).to_i
210
211
  return custom_retry_in if custom_retry_in > 0
211
212
  end
@@ -214,16 +215,14 @@ module Sidekiq
214
215
 
215
216
  # delayed_job uses the same basic formula
216
217
  def seconds_to_delay(count)
217
- (count ** 4) + 15 + (rand(30)*(count+1))
218
+ (count**4) + 15 + (rand(30) * (count + 1))
218
219
  end
219
220
 
220
221
  def retry_in(worker, count, exception)
221
- begin
222
- worker.sidekiq_retry_in_block.call(count, exception)
223
- rescue Exception => e
224
- handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
225
- nil
226
- end
222
+ worker.sidekiq_retry_in_block.call(count, exception)
223
+ rescue Exception => e
224
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
225
+ nil
227
226
  end
228
227
 
229
228
  def exception_caused_by_shutdown?(e, checked_causes = [])
@@ -237,5 +236,14 @@ module Sidekiq
237
236
  exception_caused_by_shutdown?(e.cause, checked_causes)
238
237
  end
239
238
 
239
+ # Extract message from exception.
240
+ # Set a default if the message raises an error
241
+ def exception_message(exception)
242
+ # App code can stuff all sorts of crazy binary data into the error message
243
+ # that won't convert to JSON.
244
+ exception.message.to_s[0, 10_000]
245
+ rescue
246
+ +"!!! ERROR MESSAGE THREW AN ERROR !!!"
247
+ end
240
248
  end
241
249
  end
@@ -1,19 +1,25 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/manager'
3
- require 'sidekiq/fetch'
4
- require 'sidekiq/scheduled'
2
+
3
+ require "sidekiq/manager"
4
+ require "sidekiq/fetch"
5
+ require "sidekiq/scheduled"
5
6
 
6
7
  module Sidekiq
7
- # The Launcher is a very simple Actor whose job is to
8
- # start, monitor and stop the core Actors in Sidekiq.
9
- # If any of these actors die, the Sidekiq process exits
10
- # immediately.
8
+ # The Launcher starts the Manager and Poller threads and provides the process heartbeat.
11
9
  class Launcher
12
10
  include Util
13
11
 
14
- attr_accessor :manager, :poller, :fetcher
12
+ STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
15
13
 
16
- STATS_TTL = 5*365*24*60*60
14
+ PROCTITLES = [
15
+ proc { "sidekiq" },
16
+ proc { Sidekiq::VERSION },
17
+ proc { |me, data| data["tag"] },
18
+ proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
+ proc { |me, data| "stopping" if me.stopping? },
20
+ ]
21
+
22
+ attr_accessor :manager, :poller, :fetcher
17
23
 
18
24
  def initialize(options)
19
25
  @manager = Sidekiq::Manager.new(options)
@@ -40,7 +46,7 @@ module Sidekiq
40
46
  # return until all work is complete and cleaned up.
41
47
  # It can take up to the timeout to complete.
42
48
  def stop
43
- deadline = Time.now + @options[:timeout]
49
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
44
50
 
45
51
  @done = true
46
52
  @manager.quiet
@@ -62,10 +68,30 @@ module Sidekiq
62
68
 
63
69
  private unless $TESTING
64
70
 
71
+ def start_heartbeat
72
+ loop do
73
+ heartbeat
74
+ sleep 5
75
+ end
76
+ Sidekiq.logger.info("Heartbeat stopping...")
77
+ end
78
+
79
+ def clear_heartbeat
80
+ # Remove record from Redis since we are shutting down.
81
+ # Note we don't stop the heartbeat thread; if the process
82
+ # doesn't actually exit, it'll reappear in the Web UI.
83
+ Sidekiq.redis do |conn|
84
+ conn.pipelined do
85
+ conn.srem("processes", identity)
86
+ conn.del("#{identity}:workers")
87
+ end
88
+ end
89
+ rescue
90
+ # best effort, ignore network errors
91
+ end
92
+
65
93
  def heartbeat
66
- results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
67
- results.compact!
68
- $0 = results.join(' ')
94
+ $0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
69
95
 
70
96
 
71
97
  end
@@ -73,6 +99,7 @@ module Sidekiq
73
99
  def ❤
74
100
  key = identity
75
101
  fails = procd = 0
102
+
76
103
  begin
77
104
  fails = Processor::FAILURE.reset
78
105
  procd = Processor::PROCESSED.reset
@@ -80,6 +107,7 @@ module Sidekiq
80
107
 
81
108
  workers_key = "#{key}:workers"
82
109
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
110
+
83
111
  Sidekiq.redis do |conn|
84
112
  conn.multi do
85
113
  conn.incrby("stat:processed", procd)
@@ -97,24 +125,27 @@ module Sidekiq
97
125
  conn.expire(workers_key, 60)
98
126
  end
99
127
  end
128
+
100
129
  fails = procd = 0
101
130
 
102
- _, exists, _, _, msg = Sidekiq.redis do |conn|
103
- conn.multi do
104
- conn.sadd('processes', key)
131
+ _, exists, _, _, msg = Sidekiq.redis { |conn|
132
+ res = conn.multi {
133
+ conn.sadd("processes", key)
105
134
  conn.exists(key)
106
- conn.hmset(key, 'info', to_json, 'busy', curstate.size, 'beat', Time.now.to_f, 'quiet', @done)
135
+ conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
107
136
  conn.expire(key, 60)
108
137
  conn.rpop("#{key}-signals")
109
- end
110
- end
138
+ }
139
+
140
+ res
141
+ }
111
142
 
112
143
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
113
- fire_event(:heartbeat) if !exists
144
+ fire_event(:heartbeat) unless exists
114
145
 
115
146
  return unless msg
116
147
 
117
- ::Process.kill(msg, $$)
148
+ ::Process.kill(msg, ::Process.pid)
118
149
  rescue => e
119
150
  # ignore all redis/network issues
120
151
  logger.error("heartbeat: #{e.message}")
@@ -124,25 +155,17 @@ module Sidekiq
124
155
  end
125
156
  end
126
157
 
127
- def start_heartbeat
128
- while true
129
- heartbeat
130
- sleep 5
131
- end
132
- Sidekiq.logger.info("Heartbeat stopping...")
133
- end
134
-
135
158
  def to_data
136
159
  @data ||= begin
137
160
  {
138
- 'hostname' => hostname,
139
- 'started_at' => Time.now.to_f,
140
- 'pid' => $$,
141
- 'tag' => @options[:tag] || '',
142
- 'concurrency' => @options[:concurrency],
143
- 'queues' => @options[:queues].uniq,
144
- 'labels' => @options[:labels],
145
- 'identity' => identity,
161
+ "hostname" => hostname,
162
+ "started_at" => Time.now.to_f,
163
+ "pid" => ::Process.pid,
164
+ "tag" => @options[:tag] || "",
165
+ "concurrency" => @options[:concurrency],
166
+ "queues" => @options[:queues].uniq,
167
+ "labels" => @options[:labels],
168
+ "identity" => identity,
146
169
  }
147
170
  end
148
171
  end
@@ -154,20 +177,5 @@ module Sidekiq
154
177
  Sidekiq.dump_json(to_data)
155
178
  end
156
179
  end
157
-
158
- def clear_heartbeat
159
- # Remove record from Redis since we are shutting down.
160
- # Note we don't stop the heartbeat thread; if the process
161
- # doesn't actually exit, it'll reappear in the Web UI.
162
- Sidekiq.redis do |conn|
163
- conn.pipelined do
164
- conn.srem('processes', identity)
165
- conn.del("#{identity}:workers")
166
- end
167
- end
168
- rescue
169
- # best effort, ignore network errors
170
- end
171
-
172
180
  end
173
181
  end