sidekiq 4.2.10 → 5.0.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.gitignore +1 -0
- data/5.0-Upgrade.md +56 -0
- data/Changes.md +24 -1
- data/Ent-Changes.md +3 -2
- data/Pro-Changes.md +6 -2
- data/README.md +2 -2
- data/bin/sidekiqctl +1 -1
- data/bin/sidekiqload +3 -8
- data/lib/sidekiq/api.rb +33 -14
- data/lib/sidekiq/cli.rb +12 -5
- data/lib/sidekiq/client.rb +15 -13
- data/lib/sidekiq/delay.rb +21 -0
- data/lib/sidekiq/extensions/generic_proxy.rb +7 -1
- data/lib/sidekiq/job_logger.rb +27 -0
- data/lib/sidekiq/job_retry.rb +235 -0
- data/lib/sidekiq/launcher.rb +1 -7
- data/lib/sidekiq/middleware/server/active_record.rb +9 -0
- data/lib/sidekiq/processor.rb +68 -31
- data/lib/sidekiq/rails.rb +2 -65
- data/lib/sidekiq/redis_connection.rb +1 -1
- data/lib/sidekiq/testing.rb +1 -1
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web/action.rb +0 -4
- data/lib/sidekiq/web/application.rb +6 -11
- data/lib/sidekiq/web/helpers.rb +9 -1
- data/lib/sidekiq/worker.rb +34 -11
- data/lib/sidekiq.rb +4 -13
- data/sidekiq.gemspec +1 -1
- data/web/assets/javascripts/dashboard.js +10 -12
- data/web/assets/stylesheets/application-rtl.css +246 -0
- data/web/assets/stylesheets/application.css +336 -4
- data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
- data/web/locales/ar.yml +80 -0
- data/web/locales/fa.yml +1 -0
- data/web/locales/he.yml +79 -0
- data/web/locales/ur.yml +80 -0
- data/web/views/_footer.erb +1 -1
- data/web/views/_nav.erb +1 -1
- data/web/views/_paging.erb +1 -1
- data/web/views/busy.erb +4 -4
- data/web/views/dashboard.erb +1 -1
- data/web/views/layout.erb +10 -1
- data/web/views/morgue.erb +4 -4
- data/web/views/queue.erb +7 -7
- data/web/views/retries.erb +5 -5
- data/web/views/scheduled.erb +2 -2
- metadata +15 -8
- data/lib/sidekiq/middleware/server/logging.rb +0 -31
- data/lib/sidekiq/middleware/server/retry_jobs.rb +0 -205
@@ -1,205 +0,0 @@
|
|
1
|
-
require 'sidekiq/scheduled'
|
2
|
-
require 'sidekiq/api'
|
3
|
-
|
4
|
-
module Sidekiq
|
5
|
-
module Middleware
|
6
|
-
module Server
|
7
|
-
##
|
8
|
-
# Automatically retry jobs that fail in Sidekiq.
|
9
|
-
# Sidekiq's retry support assumes a typical development lifecycle:
|
10
|
-
#
|
11
|
-
# 0. Push some code changes with a bug in it.
|
12
|
-
# 1. Bug causes job processing to fail, Sidekiq's middleware captures
|
13
|
-
# the job and pushes it onto a retry queue.
|
14
|
-
# 2. Sidekiq retries jobs in the retry queue multiple times with
|
15
|
-
# an exponential delay, the job continues to fail.
|
16
|
-
# 3. After a few days, a developer deploys a fix. The job is
|
17
|
-
# reprocessed successfully.
|
18
|
-
# 4. Once retries are exhausted, Sidekiq will give up and move the
|
19
|
-
# job to the Dead Job Queue (aka morgue) where it must be dealt with
|
20
|
-
# manually in the Web UI.
|
21
|
-
# 5. After 6 months on the DJQ, Sidekiq will discard the job.
|
22
|
-
#
|
23
|
-
# A job looks like:
|
24
|
-
#
|
25
|
-
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
|
26
|
-
#
|
27
|
-
# The 'retry' option also accepts a number (in place of 'true'):
|
28
|
-
#
|
29
|
-
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
|
30
|
-
#
|
31
|
-
# The job will be retried this number of times before giving up. (If simply
|
32
|
-
# 'true', Sidekiq retries 25 times)
|
33
|
-
#
|
34
|
-
# We'll add a bit more data to the job to support retries:
|
35
|
-
#
|
36
|
-
# * 'queue' - the queue to use
|
37
|
-
# * 'retry_count' - number of times we've retried so far.
|
38
|
-
# * 'error_message' - the message from the exception
|
39
|
-
# * 'error_class' - the exception class
|
40
|
-
# * 'failed_at' - the first time it failed
|
41
|
-
# * 'retried_at' - the last time it was retried
|
42
|
-
# * 'backtrace' - the number of lines of error backtrace to store
|
43
|
-
#
|
44
|
-
# We don't store the backtrace by default as that can add a lot of overhead
|
45
|
-
# to the job and everyone is using an error service, right?
|
46
|
-
#
|
47
|
-
# The default number of retry attempts is 25 which works out to about 3 weeks
|
48
|
-
# of retries. You can pass a value for the max number of retry attempts when
|
49
|
-
# adding the middleware using the options hash:
|
50
|
-
#
|
51
|
-
# Sidekiq.configure_server do |config|
|
52
|
-
# config.server_middleware do |chain|
|
53
|
-
# chain.add Sidekiq::Middleware::Server::RetryJobs, :max_retries => 7
|
54
|
-
# end
|
55
|
-
# end
|
56
|
-
#
|
57
|
-
# or limit the number of retries for a particular worker with:
|
58
|
-
#
|
59
|
-
# class MyWorker
|
60
|
-
# include Sidekiq::Worker
|
61
|
-
# sidekiq_options :retry => 10
|
62
|
-
# end
|
63
|
-
#
|
64
|
-
class RetryJobs
|
65
|
-
include Sidekiq::Util
|
66
|
-
|
67
|
-
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
68
|
-
|
69
|
-
def initialize(options = {})
|
70
|
-
@max_retries = options.fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
|
71
|
-
end
|
72
|
-
|
73
|
-
def call(worker, msg, queue)
|
74
|
-
yield
|
75
|
-
rescue Sidekiq::Shutdown
|
76
|
-
# ignore, will be pushed back onto queue during hard_shutdown
|
77
|
-
raise
|
78
|
-
rescue Exception => e
|
79
|
-
# ignore, will be pushed back onto queue during hard_shutdown
|
80
|
-
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
81
|
-
|
82
|
-
raise e unless msg['retry']
|
83
|
-
attempt_retry(worker, msg, queue, e)
|
84
|
-
end
|
85
|
-
|
86
|
-
private
|
87
|
-
|
88
|
-
def attempt_retry(worker, msg, queue, exception)
|
89
|
-
max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
|
90
|
-
|
91
|
-
msg['queue'] = if msg['retry_queue']
|
92
|
-
msg['retry_queue']
|
93
|
-
else
|
94
|
-
queue
|
95
|
-
end
|
96
|
-
|
97
|
-
# App code can stuff all sorts of crazy binary data into the error message
|
98
|
-
# that won't convert to JSON.
|
99
|
-
m = exception.message.to_s[0, 10_000]
|
100
|
-
if m.respond_to?(:scrub!)
|
101
|
-
m.force_encoding("utf-8")
|
102
|
-
m.scrub!
|
103
|
-
end
|
104
|
-
|
105
|
-
msg['error_message'] = m
|
106
|
-
msg['error_class'] = exception.class.name
|
107
|
-
count = if msg['retry_count']
|
108
|
-
msg['retried_at'] = Time.now.to_f
|
109
|
-
msg['retry_count'] += 1
|
110
|
-
else
|
111
|
-
msg['failed_at'] = Time.now.to_f
|
112
|
-
msg['retry_count'] = 0
|
113
|
-
end
|
114
|
-
|
115
|
-
if msg['backtrace'] == true
|
116
|
-
msg['error_backtrace'] = exception.backtrace
|
117
|
-
elsif !msg['backtrace']
|
118
|
-
# do nothing
|
119
|
-
elsif msg['backtrace'].to_i != 0
|
120
|
-
msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
|
121
|
-
end
|
122
|
-
|
123
|
-
if count < max_retry_attempts
|
124
|
-
delay = delay_for(worker, count, exception)
|
125
|
-
logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
126
|
-
retry_at = Time.now.to_f + delay
|
127
|
-
payload = Sidekiq.dump_json(msg)
|
128
|
-
Sidekiq.redis do |conn|
|
129
|
-
conn.zadd('retry', retry_at.to_s, payload)
|
130
|
-
end
|
131
|
-
else
|
132
|
-
# Goodbye dear message, you (re)tried your best I'm sure.
|
133
|
-
retries_exhausted(worker, msg, exception)
|
134
|
-
end
|
135
|
-
|
136
|
-
raise exception
|
137
|
-
end
|
138
|
-
|
139
|
-
def retries_exhausted(worker, msg, exception)
|
140
|
-
logger.debug { "Retries exhausted for job" }
|
141
|
-
begin
|
142
|
-
block = worker.sidekiq_retries_exhausted_block || Sidekiq.default_retries_exhausted
|
143
|
-
block.call(msg, exception) if block
|
144
|
-
rescue => e
|
145
|
-
handle_exception(e, { context: "Error calling retries_exhausted for #{worker.class}", job: msg })
|
146
|
-
end
|
147
|
-
|
148
|
-
send_to_morgue(msg) unless msg['dead'] == false
|
149
|
-
end
|
150
|
-
|
151
|
-
def send_to_morgue(msg)
|
152
|
-
Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
|
153
|
-
payload = Sidekiq.dump_json(msg)
|
154
|
-
now = Time.now.to_f
|
155
|
-
Sidekiq.redis do |conn|
|
156
|
-
conn.multi do
|
157
|
-
conn.zadd('dead', now, payload)
|
158
|
-
conn.zremrangebyscore('dead', '-inf', now - DeadSet.timeout)
|
159
|
-
conn.zremrangebyrank('dead', 0, -DeadSet.max_jobs)
|
160
|
-
end
|
161
|
-
end
|
162
|
-
end
|
163
|
-
|
164
|
-
def retry_attempts_from(msg_retry, default)
|
165
|
-
if msg_retry.is_a?(Integer)
|
166
|
-
msg_retry
|
167
|
-
else
|
168
|
-
default
|
169
|
-
end
|
170
|
-
end
|
171
|
-
|
172
|
-
def delay_for(worker, count, exception)
|
173
|
-
worker.sidekiq_retry_in_block? && retry_in(worker, count, exception) || seconds_to_delay(count)
|
174
|
-
end
|
175
|
-
|
176
|
-
# delayed_job uses the same basic formula
|
177
|
-
def seconds_to_delay(count)
|
178
|
-
(count ** 4) + 15 + (rand(30)*(count+1))
|
179
|
-
end
|
180
|
-
|
181
|
-
def retry_in(worker, count, exception)
|
182
|
-
begin
|
183
|
-
worker.sidekiq_retry_in_block.call(count, exception).to_i
|
184
|
-
rescue Exception => e
|
185
|
-
handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
|
186
|
-
nil
|
187
|
-
end
|
188
|
-
end
|
189
|
-
|
190
|
-
def exception_caused_by_shutdown?(e, checked_causes = [])
|
191
|
-
# In Ruby 2.1.0 only, check if exception is a result of shutdown.
|
192
|
-
return false unless defined?(e.cause)
|
193
|
-
|
194
|
-
# Handle circular causes
|
195
|
-
checked_causes << e.object_id
|
196
|
-
return false if checked_causes.include?(e.cause.object_id)
|
197
|
-
|
198
|
-
e.cause.instance_of?(Sidekiq::Shutdown) ||
|
199
|
-
exception_caused_by_shutdown?(e.cause, checked_causes)
|
200
|
-
end
|
201
|
-
|
202
|
-
end
|
203
|
-
end
|
204
|
-
end
|
205
|
-
end
|