sidekiq 5.2.3 → 6.0.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +5 -5
- data/.circleci/config.yml +61 -0
- data/.gitignore +1 -1
- data/.standard.yml +20 -0
- data/6.0-Upgrade.md +70 -0
- data/COMM-LICENSE +11 -9
- data/Changes.md +61 -0
- data/Ent-2.0-Upgrade.md +37 -0
- data/Ent-Changes.md +27 -1
- data/Gemfile +19 -9
- data/Gemfile.lock +196 -0
- data/Pro-5.0-Upgrade.md +25 -0
- data/Pro-Changes.md +19 -2
- data/README.md +17 -31
- data/Rakefile +6 -4
- data/bin/sidekiqload +27 -23
- data/bin/sidekiqmon +9 -0
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
- data/lib/generators/sidekiq/worker_generator.rb +12 -14
- data/lib/sidekiq.rb +56 -43
- data/lib/sidekiq/api.rb +138 -151
- data/lib/sidekiq/cli.rb +141 -206
- data/lib/sidekiq/client.rb +45 -46
- data/lib/sidekiq/delay.rb +5 -6
- data/lib/sidekiq/exception_handler.rb +10 -12
- data/lib/sidekiq/extensions/action_mailer.rb +10 -20
- data/lib/sidekiq/extensions/active_record.rb +9 -7
- data/lib/sidekiq/extensions/class_methods.rb +9 -7
- data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
- data/lib/sidekiq/fetch.rb +5 -6
- data/lib/sidekiq/job_logger.rb +37 -7
- data/lib/sidekiq/job_retry.rb +55 -57
- data/lib/sidekiq/launcher.rb +59 -51
- data/lib/sidekiq/logger.rb +69 -0
- data/lib/sidekiq/manager.rb +7 -9
- data/lib/sidekiq/middleware/chain.rb +3 -2
- data/lib/sidekiq/middleware/i18n.rb +5 -7
- data/lib/sidekiq/monitor.rb +148 -0
- data/lib/sidekiq/paginator.rb +11 -12
- data/lib/sidekiq/processor.rb +68 -58
- data/lib/sidekiq/rails.rb +24 -29
- data/lib/sidekiq/redis_connection.rb +31 -37
- data/lib/sidekiq/scheduled.rb +17 -19
- data/lib/sidekiq/testing.rb +22 -23
- data/lib/sidekiq/testing/inline.rb +2 -1
- data/lib/sidekiq/util.rb +17 -14
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web.rb +41 -49
- data/lib/sidekiq/web/action.rb +14 -10
- data/lib/sidekiq/web/application.rb +61 -58
- data/lib/sidekiq/web/helpers.rb +72 -66
- data/lib/sidekiq/web/router.rb +17 -14
- data/lib/sidekiq/worker.rb +134 -102
- data/sidekiq.gemspec +16 -18
- data/web/assets/javascripts/dashboard.js +2 -21
- data/web/assets/stylesheets/bootstrap.css +1 -1
- data/web/locales/ja.yml +2 -1
- data/web/views/queues.erb +1 -1
- metadata +31 -26
- data/.travis.yml +0 -14
- data/bin/sidekiqctl +0 -237
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
data/lib/sidekiq/job_logger.rb
CHANGED
@@ -1,25 +1,55 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
+
|
2
3
|
module Sidekiq
|
3
4
|
class JobLogger
|
5
|
+
def initialize(logger = Sidekiq.logger)
|
6
|
+
@logger = logger
|
7
|
+
end
|
4
8
|
|
5
9
|
def call(item, queue)
|
6
10
|
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
7
|
-
logger.info("start")
|
11
|
+
@logger.info("start")
|
12
|
+
|
8
13
|
yield
|
9
|
-
|
14
|
+
|
15
|
+
with_elapsed_time_context(start) do
|
16
|
+
@logger.info("done")
|
17
|
+
end
|
10
18
|
rescue Exception
|
11
|
-
|
19
|
+
with_elapsed_time_context(start) do
|
20
|
+
@logger.info("fail")
|
21
|
+
end
|
22
|
+
|
12
23
|
raise
|
13
24
|
end
|
14
25
|
|
26
|
+
def with_job_hash_context(job_hash, &block)
|
27
|
+
@logger.with_context(job_hash_context(job_hash), &block)
|
28
|
+
end
|
29
|
+
|
30
|
+
def job_hash_context(job_hash)
|
31
|
+
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
32
|
+
# attribute to expose the underlying thing.
|
33
|
+
h = {
|
34
|
+
class: job_hash["wrapped"] || job_hash["class"],
|
35
|
+
jid: job_hash["jid"],
|
36
|
+
}
|
37
|
+
h[:bid] = job_hash["bid"] if job_hash["bid"]
|
38
|
+
h
|
39
|
+
end
|
40
|
+
|
41
|
+
def with_elapsed_time_context(start, &block)
|
42
|
+
@logger.with_context(elapsed_time_context(start), &block)
|
43
|
+
end
|
44
|
+
|
45
|
+
def elapsed_time_context(start)
|
46
|
+
{elapsed: elapsed(start).to_s}
|
47
|
+
end
|
48
|
+
|
15
49
|
private
|
16
50
|
|
17
51
|
def elapsed(start)
|
18
52
|
(::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
|
19
53
|
end
|
20
|
-
|
21
|
-
def logger
|
22
|
-
Sidekiq.logger
|
23
|
-
end
|
24
54
|
end
|
25
55
|
end
|
data/lib/sidekiq/job_retry.rb
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require
|
2
|
+
|
3
|
+
require "sidekiq/scheduled"
|
4
|
+
require "sidekiq/api"
|
4
5
|
|
5
6
|
module Sidekiq
|
6
7
|
##
|
@@ -56,7 +57,8 @@ module Sidekiq
|
|
56
57
|
# end
|
57
58
|
#
|
58
59
|
class JobRetry
|
59
|
-
class
|
60
|
+
class Handled < ::RuntimeError; end
|
61
|
+
class Skip < Handled; end
|
60
62
|
|
61
63
|
include Sidekiq::Util
|
62
64
|
|
@@ -71,7 +73,7 @@ module Sidekiq
|
|
71
73
|
# require the worker to be instantiated.
|
72
74
|
def global(msg, queue)
|
73
75
|
yield
|
74
|
-
rescue
|
76
|
+
rescue Handled => ex
|
75
77
|
raise ex
|
76
78
|
rescue Sidekiq::Shutdown => ey
|
77
79
|
# ignore, will be pushed back onto queue during hard_shutdown
|
@@ -80,22 +82,19 @@ module Sidekiq
|
|
80
82
|
# ignore, will be pushed back onto queue during hard_shutdown
|
81
83
|
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
82
84
|
|
83
|
-
if msg[
|
85
|
+
if msg["retry"]
|
84
86
|
attempt_retry(nil, msg, queue, e)
|
85
87
|
else
|
86
88
|
Sidekiq.death_handlers.each do |handler|
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
|
91
|
-
end
|
89
|
+
handler.call(msg, e)
|
90
|
+
rescue => handler_ex
|
91
|
+
handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
|
92
92
|
end
|
93
93
|
end
|
94
94
|
|
95
|
-
raise
|
95
|
+
raise Handled
|
96
96
|
end
|
97
97
|
|
98
|
-
|
99
98
|
# The local retry support means that any errors that occur within
|
100
99
|
# this block can be associated with the given worker instance.
|
101
100
|
# This is required to support the `sidekiq_retries_exhausted` block.
|
@@ -106,7 +105,7 @@ module Sidekiq
|
|
106
105
|
# calling the handle_exception handlers.
|
107
106
|
def local(worker, msg, queue)
|
108
107
|
yield
|
109
|
-
rescue
|
108
|
+
rescue Handled => ex
|
110
109
|
raise ex
|
111
110
|
rescue Sidekiq::Shutdown => ey
|
112
111
|
# ignore, will be pushed back onto queue during hard_shutdown
|
@@ -115,11 +114,11 @@ module Sidekiq
|
|
115
114
|
# ignore, will be pushed back onto queue during hard_shutdown
|
116
115
|
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
117
116
|
|
118
|
-
if msg[
|
119
|
-
msg[
|
117
|
+
if msg["retry"].nil?
|
118
|
+
msg["retry"] = worker.class.get_sidekiq_options["retry"]
|
120
119
|
end
|
121
120
|
|
122
|
-
raise e unless msg[
|
121
|
+
raise e unless msg["retry"]
|
123
122
|
attempt_retry(worker, msg, queue, e)
|
124
123
|
# We've handled this error associated with this job, don't
|
125
124
|
# need to handle it at the global level
|
@@ -132,48 +131,42 @@ module Sidekiq
|
|
132
131
|
# instantiate the worker instance. All access must be guarded and
|
133
132
|
# best effort.
|
134
133
|
def attempt_retry(worker, msg, queue, exception)
|
135
|
-
max_retry_attempts = retry_attempts_from(msg[
|
134
|
+
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
|
136
135
|
|
137
|
-
msg[
|
138
|
-
msg['retry_queue']
|
139
|
-
else
|
140
|
-
queue
|
141
|
-
end
|
136
|
+
msg["queue"] = (msg["retry_queue"] || queue)
|
142
137
|
|
143
|
-
|
144
|
-
# that won't convert to JSON.
|
145
|
-
m = exception.message.to_s[0, 10_000]
|
138
|
+
m = exception_message(exception)
|
146
139
|
if m.respond_to?(:scrub!)
|
147
140
|
m.force_encoding("utf-8")
|
148
141
|
m.scrub!
|
149
142
|
end
|
150
143
|
|
151
|
-
msg[
|
152
|
-
msg[
|
153
|
-
count = if msg[
|
154
|
-
msg[
|
155
|
-
msg[
|
144
|
+
msg["error_message"] = m
|
145
|
+
msg["error_class"] = exception.class.name
|
146
|
+
count = if msg["retry_count"]
|
147
|
+
msg["retried_at"] = Time.now.to_f
|
148
|
+
msg["retry_count"] += 1
|
156
149
|
else
|
157
|
-
msg[
|
158
|
-
msg[
|
150
|
+
msg["failed_at"] = Time.now.to_f
|
151
|
+
msg["retry_count"] = 0
|
159
152
|
end
|
160
153
|
|
161
|
-
if msg[
|
162
|
-
msg[
|
163
|
-
elsif !msg[
|
154
|
+
if msg["backtrace"] == true
|
155
|
+
msg["error_backtrace"] = exception.backtrace
|
156
|
+
elsif !msg["backtrace"]
|
164
157
|
# do nothing
|
165
|
-
elsif msg[
|
166
|
-
msg[
|
158
|
+
elsif msg["backtrace"].to_i != 0
|
159
|
+
msg["error_backtrace"] = exception.backtrace[0...msg["backtrace"].to_i]
|
167
160
|
end
|
168
161
|
|
169
162
|
if count < max_retry_attempts
|
170
163
|
delay = delay_for(worker, count, exception)
|
171
164
|
# Logging here can break retries if the logging device raises ENOSPC #3979
|
172
|
-
#logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
165
|
+
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
173
166
|
retry_at = Time.now.to_f + delay
|
174
167
|
payload = Sidekiq.dump_json(msg)
|
175
168
|
Sidekiq.redis do |conn|
|
176
|
-
conn.zadd(
|
169
|
+
conn.zadd("retry", retry_at.to_s, payload)
|
177
170
|
end
|
178
171
|
else
|
179
172
|
# Goodbye dear message, you (re)tried your best I'm sure.
|
@@ -183,25 +176,23 @@ module Sidekiq
|
|
183
176
|
|
184
177
|
def retries_exhausted(worker, msg, exception)
|
185
178
|
begin
|
186
|
-
block = worker
|
187
|
-
block
|
179
|
+
block = worker&.sidekiq_retries_exhausted_block
|
180
|
+
block&.call(msg, exception)
|
188
181
|
rescue => e
|
189
|
-
handle_exception(e, {
|
182
|
+
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
|
190
183
|
end
|
191
184
|
|
192
185
|
Sidekiq.death_handlers.each do |handler|
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
handle_exception(e, { context: "Error calling death handler", job: msg })
|
197
|
-
end
|
186
|
+
handler.call(msg, exception)
|
187
|
+
rescue => e
|
188
|
+
handle_exception(e, {context: "Error calling death handler", job: msg})
|
198
189
|
end
|
199
190
|
|
200
|
-
send_to_morgue(msg) unless msg[
|
191
|
+
send_to_morgue(msg) unless msg["dead"] == false
|
201
192
|
end
|
202
193
|
|
203
194
|
def send_to_morgue(msg)
|
204
|
-
logger.info { "Adding dead #{msg[
|
195
|
+
logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
|
205
196
|
payload = Sidekiq.dump_json(msg)
|
206
197
|
DeadSet.new.kill(payload, notify_failure: false)
|
207
198
|
end
|
@@ -215,7 +206,7 @@ module Sidekiq
|
|
215
206
|
end
|
216
207
|
|
217
208
|
def delay_for(worker, count, exception)
|
218
|
-
if worker
|
209
|
+
if worker&.sidekiq_retry_in_block
|
219
210
|
custom_retry_in = retry_in(worker, count, exception).to_i
|
220
211
|
return custom_retry_in if custom_retry_in > 0
|
221
212
|
end
|
@@ -224,16 +215,14 @@ module Sidekiq
|
|
224
215
|
|
225
216
|
# delayed_job uses the same basic formula
|
226
217
|
def seconds_to_delay(count)
|
227
|
-
(count
|
218
|
+
(count**4) + 15 + (rand(30) * (count + 1))
|
228
219
|
end
|
229
220
|
|
230
221
|
def retry_in(worker, count, exception)
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
nil
|
236
|
-
end
|
222
|
+
worker.sidekiq_retry_in_block.call(count, exception)
|
223
|
+
rescue Exception => e
|
224
|
+
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
|
225
|
+
nil
|
237
226
|
end
|
238
227
|
|
239
228
|
def exception_caused_by_shutdown?(e, checked_causes = [])
|
@@ -247,5 +236,14 @@ module Sidekiq
|
|
247
236
|
exception_caused_by_shutdown?(e.cause, checked_causes)
|
248
237
|
end
|
249
238
|
|
239
|
+
# Extract message from exception.
|
240
|
+
# Set a default if the message raises an error
|
241
|
+
def exception_message(exception)
|
242
|
+
# App code can stuff all sorts of crazy binary data into the error message
|
243
|
+
# that won't convert to JSON.
|
244
|
+
exception.message.to_s[0, 10_000]
|
245
|
+
rescue
|
246
|
+
+"!!! ERROR MESSAGE THREW AN ERROR !!!"
|
247
|
+
end
|
250
248
|
end
|
251
249
|
end
|
data/lib/sidekiq/launcher.rb
CHANGED
@@ -1,19 +1,25 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require
|
4
|
-
require
|
2
|
+
|
3
|
+
require "sidekiq/manager"
|
4
|
+
require "sidekiq/fetch"
|
5
|
+
require "sidekiq/scheduled"
|
5
6
|
|
6
7
|
module Sidekiq
|
7
|
-
# The Launcher
|
8
|
-
# start, monitor and stop the core Actors in Sidekiq.
|
9
|
-
# If any of these actors die, the Sidekiq process exits
|
10
|
-
# immediately.
|
8
|
+
# The Launcher starts the Manager and Poller threads and provides the process heartbeat.
|
11
9
|
class Launcher
|
12
10
|
include Util
|
13
11
|
|
14
|
-
|
12
|
+
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
|
15
13
|
|
16
|
-
|
14
|
+
PROCTITLES = [
|
15
|
+
proc { "sidekiq" },
|
16
|
+
proc { Sidekiq::VERSION },
|
17
|
+
proc { |me, data| data["tag"] },
|
18
|
+
proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
|
19
|
+
proc { |me, data| "stopping" if me.stopping? },
|
20
|
+
]
|
21
|
+
|
22
|
+
attr_accessor :manager, :poller, :fetcher
|
17
23
|
|
18
24
|
def initialize(options)
|
19
25
|
@manager = Sidekiq::Manager.new(options)
|
@@ -62,10 +68,30 @@ module Sidekiq
|
|
62
68
|
|
63
69
|
private unless $TESTING
|
64
70
|
|
71
|
+
def start_heartbeat
|
72
|
+
loop do
|
73
|
+
heartbeat
|
74
|
+
sleep 5
|
75
|
+
end
|
76
|
+
Sidekiq.logger.info("Heartbeat stopping...")
|
77
|
+
end
|
78
|
+
|
79
|
+
def clear_heartbeat
|
80
|
+
# Remove record from Redis since we are shutting down.
|
81
|
+
# Note we don't stop the heartbeat thread; if the process
|
82
|
+
# doesn't actually exit, it'll reappear in the Web UI.
|
83
|
+
Sidekiq.redis do |conn|
|
84
|
+
conn.pipelined do
|
85
|
+
conn.srem("processes", identity)
|
86
|
+
conn.del("#{identity}:workers")
|
87
|
+
end
|
88
|
+
end
|
89
|
+
rescue
|
90
|
+
# best effort, ignore network errors
|
91
|
+
end
|
92
|
+
|
65
93
|
def heartbeat
|
66
|
-
|
67
|
-
results.compact!
|
68
|
-
$0 = results.join(' ')
|
94
|
+
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
|
69
95
|
|
70
96
|
❤
|
71
97
|
end
|
@@ -73,6 +99,7 @@ module Sidekiq
|
|
73
99
|
def ❤
|
74
100
|
key = identity
|
75
101
|
fails = procd = 0
|
102
|
+
|
76
103
|
begin
|
77
104
|
fails = Processor::FAILURE.reset
|
78
105
|
procd = Processor::PROCESSED.reset
|
@@ -80,6 +107,7 @@ module Sidekiq
|
|
80
107
|
|
81
108
|
workers_key = "#{key}:workers"
|
82
109
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
110
|
+
|
83
111
|
Sidekiq.redis do |conn|
|
84
112
|
conn.multi do
|
85
113
|
conn.incrby("stat:processed", procd)
|
@@ -97,24 +125,27 @@ module Sidekiq
|
|
97
125
|
conn.expire(workers_key, 60)
|
98
126
|
end
|
99
127
|
end
|
128
|
+
|
100
129
|
fails = procd = 0
|
101
130
|
|
102
|
-
_, exists, _, _, msg = Sidekiq.redis
|
103
|
-
conn.multi
|
104
|
-
conn.sadd(
|
131
|
+
_, exists, _, _, msg = Sidekiq.redis { |conn|
|
132
|
+
res = conn.multi {
|
133
|
+
conn.sadd("processes", key)
|
105
134
|
conn.exists(key)
|
106
|
-
conn.hmset(key,
|
135
|
+
conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
|
107
136
|
conn.expire(key, 60)
|
108
137
|
conn.rpop("#{key}-signals")
|
109
|
-
|
110
|
-
|
138
|
+
}
|
139
|
+
|
140
|
+
res
|
141
|
+
}
|
111
142
|
|
112
143
|
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
|
113
|
-
fire_event(:heartbeat)
|
144
|
+
fire_event(:heartbeat) unless exists
|
114
145
|
|
115
146
|
return unless msg
|
116
147
|
|
117
|
-
::Process.kill(msg,
|
148
|
+
::Process.kill(msg, ::Process.pid)
|
118
149
|
rescue => e
|
119
150
|
# ignore all redis/network issues
|
120
151
|
logger.error("heartbeat: #{e.message}")
|
@@ -124,25 +155,17 @@ module Sidekiq
|
|
124
155
|
end
|
125
156
|
end
|
126
157
|
|
127
|
-
def start_heartbeat
|
128
|
-
while true
|
129
|
-
heartbeat
|
130
|
-
sleep 5
|
131
|
-
end
|
132
|
-
Sidekiq.logger.info("Heartbeat stopping...")
|
133
|
-
end
|
134
|
-
|
135
158
|
def to_data
|
136
159
|
@data ||= begin
|
137
160
|
{
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
161
|
+
"hostname" => hostname,
|
162
|
+
"started_at" => Time.now.to_f,
|
163
|
+
"pid" => ::Process.pid,
|
164
|
+
"tag" => @options[:tag] || "",
|
165
|
+
"concurrency" => @options[:concurrency],
|
166
|
+
"queues" => @options[:queues].uniq,
|
167
|
+
"labels" => @options[:labels],
|
168
|
+
"identity" => identity,
|
146
169
|
}
|
147
170
|
end
|
148
171
|
end
|
@@ -154,20 +177,5 @@ module Sidekiq
|
|
154
177
|
Sidekiq.dump_json(to_data)
|
155
178
|
end
|
156
179
|
end
|
157
|
-
|
158
|
-
def clear_heartbeat
|
159
|
-
# Remove record from Redis since we are shutting down.
|
160
|
-
# Note we don't stop the heartbeat thread; if the process
|
161
|
-
# doesn't actually exit, it'll reappear in the Web UI.
|
162
|
-
Sidekiq.redis do |conn|
|
163
|
-
conn.pipelined do
|
164
|
-
conn.srem('processes', identity)
|
165
|
-
conn.del("#{identity}:workers")
|
166
|
-
end
|
167
|
-
end
|
168
|
-
rescue
|
169
|
-
# best effort, ignore network errors
|
170
|
-
end
|
171
|
-
|
172
180
|
end
|
173
181
|
end
|