sidekiq 4.2.10 → 5.0.0.beta1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.github/issue_template.md +1 -6
- data/.gitignore +1 -0
- data/5.0-Upgrade.md +52 -0
- data/Changes.md +14 -6
- data/Ent-Changes.md +1 -2
- data/Pro-Changes.md +1 -19
- data/README.md +2 -2
- data/bin/sidekiqctl +1 -1
- data/bin/sidekiqload +14 -19
- data/lib/sidekiq.rb +3 -12
- data/lib/sidekiq/api.rb +30 -31
- data/lib/sidekiq/cli.rb +12 -5
- data/lib/sidekiq/delay.rb +21 -0
- data/lib/sidekiq/extensions/generic_proxy.rb +7 -1
- data/lib/sidekiq/job_logger.rb +36 -0
- data/lib/sidekiq/job_retry.rb +232 -0
- data/lib/sidekiq/launcher.rb +1 -7
- data/lib/sidekiq/middleware/server/active_record.rb +9 -0
- data/lib/sidekiq/processor.rb +63 -29
- data/lib/sidekiq/rails.rb +2 -65
- data/lib/sidekiq/testing.rb +0 -6
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web/application.rb +1 -1
- data/lib/sidekiq/web/helpers.rb +1 -2
- data/sidekiq.gemspec +2 -2
- data/test/config.yml +9 -0
- data/test/env_based_config.yml +11 -0
- data/test/fake_env.rb +1 -0
- data/test/fixtures/en.yml +2 -0
- data/test/helper.rb +98 -0
- data/test/test_actors.rb +138 -0
- data/test/test_api.rb +529 -0
- data/test/test_cli.rb +418 -0
- data/test/test_client.rb +266 -0
- data/test/test_exception_handler.rb +56 -0
- data/test/test_extensions.rb +115 -0
- data/test/test_fetch.rb +50 -0
- data/test/test_launcher.rb +92 -0
- data/test/test_logging.rb +35 -0
- data/test/test_manager.rb +50 -0
- data/test/test_middleware.rb +158 -0
- data/test/test_processor.rb +266 -0
- data/test/test_rails.rb +22 -0
- data/test/test_redis_connection.rb +132 -0
- data/test/test_retry.rb +335 -0
- data/test/test_retry_exhausted.rb +149 -0
- data/test/test_scheduled.rb +115 -0
- data/test/test_scheduling.rb +58 -0
- data/test/test_sidekiq.rb +107 -0
- data/test/test_testing.rb +135 -0
- data/test/test_testing_fake.rb +352 -0
- data/test/test_testing_inline.rb +93 -0
- data/test/test_util.rb +13 -0
- data/test/test_web.rb +638 -0
- data/test/test_web_auth.rb +54 -0
- data/test/test_web_helpers.rb +54 -0
- data/test/test_web_sessions.rb +67 -0
- data/web/assets/javascripts/dashboard.js +1 -1
- data/web/views/_job_info.erb +1 -1
- data/web/views/dashboard.erb +2 -2
- data/web/views/morgue.erb +0 -2
- data/web/views/queue.erb +1 -1
- data/web/views/retry.erb +1 -1
- metadata +73 -8
- data/lib/sidekiq/middleware/server/logging.rb +0 -31
- data/lib/sidekiq/middleware/server/retry_jobs.rb +0 -205
- data/web/locales/fa.yml +0 -79
data/lib/sidekiq/cli.rb
CHANGED
@@ -43,6 +43,10 @@ module Sidekiq
|
|
43
43
|
write_pid
|
44
44
|
end
|
45
45
|
|
46
|
+
def jruby?
|
47
|
+
defined?(::JRUBY_VERSION)
|
48
|
+
end
|
49
|
+
|
46
50
|
# Code within this method is not tested because it alters
|
47
51
|
# global process state irreversibly. PRs which improve the
|
48
52
|
# test coverage of Sidekiq::CLI are welcomed.
|
@@ -51,8 +55,14 @@ module Sidekiq
|
|
51
55
|
print_banner
|
52
56
|
|
53
57
|
self_read, self_write = IO.pipe
|
58
|
+
sigs = %w(INT TERM TTIN TSTP)
|
59
|
+
# USR1 and USR2 don't work on the JVM
|
60
|
+
if !jruby?
|
61
|
+
sigs << 'USR1'
|
62
|
+
sigs << 'USR2'
|
63
|
+
end
|
54
64
|
|
55
|
-
|
65
|
+
sigs.each do |sig|
|
56
66
|
begin
|
57
67
|
trap sig do
|
58
68
|
self_write.puts(sig)
|
@@ -230,9 +240,7 @@ module Sidekiq
|
|
230
240
|
if File.directory?(options[:require])
|
231
241
|
require 'rails'
|
232
242
|
if ::Rails::VERSION::MAJOR < 4
|
233
|
-
|
234
|
-
require File.expand_path("#{options[:require]}/config/environment.rb")
|
235
|
-
::Rails.application.eager_load!
|
243
|
+
raise "Sidekiq no longer supports this version of Rails"
|
236
244
|
elsif ::Rails::VERSION::MAJOR == 4
|
237
245
|
# Painful contortions, see 1791 for discussion
|
238
246
|
# No autoloading, we want to force eager load for everything.
|
@@ -243,7 +251,6 @@ module Sidekiq
|
|
243
251
|
require 'sidekiq/rails'
|
244
252
|
require File.expand_path("#{options[:require]}/config/environment.rb")
|
245
253
|
else
|
246
|
-
# Rails 5+ && development mode, use Reloader
|
247
254
|
require 'sidekiq/rails'
|
248
255
|
require File.expand_path("#{options[:require]}/config/environment.rb")
|
249
256
|
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
module Sidekiq
|
2
|
+
module Extensions
|
3
|
+
|
4
|
+
def self.enable_delay!
|
5
|
+
if defined?(::ActiveSupport)
|
6
|
+
ActiveSupport.on_load(:active_record) do
|
7
|
+
require 'sidekiq/extensions/active_record'
|
8
|
+
include Sidekiq::Extensions::ActiveRecord
|
9
|
+
end
|
10
|
+
ActiveSupport.on_load(:action_mailer) do
|
11
|
+
require 'sidekiq/extensions/action_mailer'
|
12
|
+
extend Sidekiq::Extensions::ActionMailer
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
require 'sidekiq/extensions/class_methods'
|
17
|
+
Module.__send__(:include, Sidekiq::Extensions::Klass)
|
18
|
+
end
|
19
|
+
|
20
|
+
end
|
21
|
+
end
|
@@ -3,6 +3,8 @@ require 'yaml'
|
|
3
3
|
|
4
4
|
module Sidekiq
|
5
5
|
module Extensions
|
6
|
+
SIZE_LIMIT = 8_192
|
7
|
+
|
6
8
|
class Proxy < BasicObject
|
7
9
|
def initialize(performable, target, options={})
|
8
10
|
@performable = performable
|
@@ -17,7 +19,11 @@ module Sidekiq
|
|
17
19
|
# to JSON and then deserialized on the other side back into a
|
18
20
|
# Ruby object.
|
19
21
|
obj = [@target, name, args]
|
20
|
-
|
22
|
+
marshalled = ::YAML.dump(obj)
|
23
|
+
if marshalled.size > SIZE_LIMIT
|
24
|
+
::Sidekiq.logger.warn { "#{@target}.#{name} job argument is #{marshalled.bytesize} bytes, you should refactor it to reduce the size" }
|
25
|
+
end
|
26
|
+
@performable.client_push({ 'class' => @performable, 'args' => [marshalled] }.merge(@opts))
|
21
27
|
end
|
22
28
|
end
|
23
29
|
|
@@ -0,0 +1,36 @@
|
|
1
|
+
module Sidekiq
|
2
|
+
class JobLogger
|
3
|
+
|
4
|
+
def call(item, queue)
|
5
|
+
Sidekiq::Logging.with_context(log_context(item)) do
|
6
|
+
begin
|
7
|
+
start = Time.now
|
8
|
+
logger.info("start".freeze)
|
9
|
+
yield
|
10
|
+
logger.info("done: #{elapsed(start)} sec")
|
11
|
+
rescue Exception
|
12
|
+
logger.info("fail: #{elapsed(start)} sec")
|
13
|
+
raise
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
private
|
19
|
+
|
20
|
+
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
21
|
+
# attribute to expose the underlying thing.
|
22
|
+
def log_context(item)
|
23
|
+
klass = item['wrapped'.freeze] || item["class".freeze]
|
24
|
+
"#{klass} JID-#{item['jid'.freeze]}#{" BID-#{item['bid'.freeze]}" if item['bid'.freeze]}"
|
25
|
+
end
|
26
|
+
|
27
|
+
def elapsed(start)
|
28
|
+
(Time.now - start).round(3)
|
29
|
+
end
|
30
|
+
|
31
|
+
def logger
|
32
|
+
Sidekiq.logger
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
@@ -0,0 +1,232 @@
|
|
1
|
+
require 'sidekiq/scheduled'
|
2
|
+
require 'sidekiq/api'
|
3
|
+
|
4
|
+
module Sidekiq
|
5
|
+
##
|
6
|
+
# Automatically retry jobs that fail in Sidekiq.
|
7
|
+
# Sidekiq's retry support assumes a typical development lifecycle:
|
8
|
+
#
|
9
|
+
# 0. Push some code changes with a bug in it.
|
10
|
+
# 1. Bug causes job processing to fail, Sidekiq's middleware captures
|
11
|
+
# the job and pushes it onto a retry queue.
|
12
|
+
# 2. Sidekiq retries jobs in the retry queue multiple times with
|
13
|
+
# an exponential delay, the job continues to fail.
|
14
|
+
# 3. After a few days, a developer deploys a fix. The job is
|
15
|
+
# reprocessed successfully.
|
16
|
+
# 4. Once retries are exhausted, Sidekiq will give up and move the
|
17
|
+
# job to the Dead Job Queue (aka morgue) where it must be dealt with
|
18
|
+
# manually in the Web UI.
|
19
|
+
# 5. After 6 months on the DJQ, Sidekiq will discard the job.
|
20
|
+
#
|
21
|
+
# A job looks like:
|
22
|
+
#
|
23
|
+
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
|
24
|
+
#
|
25
|
+
# The 'retry' option also accepts a number (in place of 'true'):
|
26
|
+
#
|
27
|
+
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
|
28
|
+
#
|
29
|
+
# The job will be retried this number of times before giving up. (If simply
|
30
|
+
# 'true', Sidekiq retries 25 times)
|
31
|
+
#
|
32
|
+
# We'll add a bit more data to the job to support retries:
|
33
|
+
#
|
34
|
+
# * 'queue' - the queue to use
|
35
|
+
# * 'retry_count' - number of times we've retried so far.
|
36
|
+
# * 'error_message' - the message from the exception
|
37
|
+
# * 'error_class' - the exception class
|
38
|
+
# * 'failed_at' - the first time it failed
|
39
|
+
# * 'retried_at' - the last time it was retried
|
40
|
+
# * 'backtrace' - the number of lines of error backtrace to store
|
41
|
+
#
|
42
|
+
# We don't store the backtrace by default as that can add a lot of overhead
|
43
|
+
# to the job and everyone is using an error service, right?
|
44
|
+
#
|
45
|
+
# The default number of retries is 25 which works out to about 3 weeks
|
46
|
+
# You can change the default maximum number of retries in your initializer:
|
47
|
+
#
|
48
|
+
# Sidekiq.options[:max_retries] = 7
|
49
|
+
#
|
50
|
+
# or limit the number of retries for a particular worker with:
|
51
|
+
#
|
52
|
+
# class MyWorker
|
53
|
+
# include Sidekiq::Worker
|
54
|
+
# sidekiq_options :retry => 10
|
55
|
+
# end
|
56
|
+
#
|
57
|
+
class JobRetry
|
58
|
+
class Skip < ::RuntimeError; end
|
59
|
+
|
60
|
+
include Sidekiq::Util
|
61
|
+
|
62
|
+
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
63
|
+
|
64
|
+
def initialize(options = {})
|
65
|
+
@max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
|
66
|
+
end
|
67
|
+
|
68
|
+
# The global retry handler requires only the barest of data.
|
69
|
+
# We want to be able to retry as much as possible so we don't
|
70
|
+
# require the worker to be instantiated.
|
71
|
+
def global(msg, queue)
|
72
|
+
yield
|
73
|
+
rescue Skip
|
74
|
+
raise
|
75
|
+
rescue Sidekiq::Shutdown
|
76
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
77
|
+
raise
|
78
|
+
rescue Exception => e
|
79
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
80
|
+
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
81
|
+
|
82
|
+
raise e unless msg['retry']
|
83
|
+
attempt_retry(nil, msg, queue, e)
|
84
|
+
end
|
85
|
+
|
86
|
+
|
87
|
+
# The local retry support means that any errors that occur within
|
88
|
+
# this block can be associated with the given worker instance.
|
89
|
+
# This is required to support the `sidekiq_retries_exhausted` block.
|
90
|
+
def local(worker, msg, queue)
|
91
|
+
yield
|
92
|
+
rescue Skip
|
93
|
+
raise
|
94
|
+
rescue Sidekiq::Shutdown
|
95
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
96
|
+
raise
|
97
|
+
rescue Exception => e
|
98
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
99
|
+
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
100
|
+
|
101
|
+
if msg['retry'] == nil
|
102
|
+
msg['retry'] = worker.class.get_sidekiq_options['retry']
|
103
|
+
end
|
104
|
+
|
105
|
+
raise e unless msg['retry']
|
106
|
+
attempt_retry(worker, msg, queue, e)
|
107
|
+
# We've handled this error associated with this job, don't
|
108
|
+
# need to handle it at the global level
|
109
|
+
raise Skip
|
110
|
+
end
|
111
|
+
|
112
|
+
private
|
113
|
+
|
114
|
+
# Note that +worker+ can be nil here if an error is raised before we can
|
115
|
+
# instantiate the worker instance. All access must be guarded and
|
116
|
+
# best effort.
|
117
|
+
def attempt_retry(worker, msg, queue, exception)
|
118
|
+
max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
|
119
|
+
|
120
|
+
msg['queue'] = if msg['retry_queue']
|
121
|
+
msg['retry_queue']
|
122
|
+
else
|
123
|
+
queue
|
124
|
+
end
|
125
|
+
|
126
|
+
# App code can stuff all sorts of crazy binary data into the error message
|
127
|
+
# that won't convert to JSON.
|
128
|
+
m = exception.message.to_s[0, 10_000]
|
129
|
+
if m.respond_to?(:scrub!)
|
130
|
+
m.force_encoding("utf-8")
|
131
|
+
m.scrub!
|
132
|
+
end
|
133
|
+
|
134
|
+
msg['error_message'] = m
|
135
|
+
msg['error_class'] = exception.class.name
|
136
|
+
count = if msg['retry_count']
|
137
|
+
msg['retried_at'] = Time.now.to_f
|
138
|
+
msg['retry_count'] += 1
|
139
|
+
else
|
140
|
+
msg['failed_at'] = Time.now.to_f
|
141
|
+
msg['retry_count'] = 0
|
142
|
+
end
|
143
|
+
|
144
|
+
if msg['backtrace'] == true
|
145
|
+
msg['error_backtrace'] = exception.backtrace
|
146
|
+
elsif !msg['backtrace']
|
147
|
+
# do nothing
|
148
|
+
elsif msg['backtrace'].to_i != 0
|
149
|
+
msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
|
150
|
+
end
|
151
|
+
|
152
|
+
if count < max_retry_attempts
|
153
|
+
delay = delay_for(worker, count, exception)
|
154
|
+
logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
155
|
+
retry_at = Time.now.to_f + delay
|
156
|
+
payload = Sidekiq.dump_json(msg)
|
157
|
+
Sidekiq.redis do |conn|
|
158
|
+
conn.zadd('retry', retry_at.to_s, payload)
|
159
|
+
end
|
160
|
+
else
|
161
|
+
# Goodbye dear message, you (re)tried your best I'm sure.
|
162
|
+
retries_exhausted(worker, msg, exception)
|
163
|
+
end
|
164
|
+
|
165
|
+
raise exception
|
166
|
+
end
|
167
|
+
|
168
|
+
def retries_exhausted(worker, msg, exception)
|
169
|
+
logger.debug { "Retries exhausted for job" }
|
170
|
+
begin
|
171
|
+
block = worker && worker.sidekiq_retries_exhausted_block || Sidekiq.default_retries_exhausted
|
172
|
+
block.call(msg, exception) if block
|
173
|
+
rescue => e
|
174
|
+
handle_exception(e, { context: "Error calling retries_exhausted for #{msg['class']}", job: msg })
|
175
|
+
end
|
176
|
+
|
177
|
+
send_to_morgue(msg) unless msg['dead'] == false
|
178
|
+
end
|
179
|
+
|
180
|
+
def send_to_morgue(msg)
|
181
|
+
Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
|
182
|
+
payload = Sidekiq.dump_json(msg)
|
183
|
+
now = Time.now.to_f
|
184
|
+
Sidekiq.redis do |conn|
|
185
|
+
conn.multi do
|
186
|
+
conn.zadd('dead', now, payload)
|
187
|
+
conn.zremrangebyscore('dead', '-inf', now - DeadSet.timeout)
|
188
|
+
conn.zremrangebyrank('dead', 0, -DeadSet.max_jobs)
|
189
|
+
end
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
def retry_attempts_from(msg_retry, default)
|
194
|
+
if msg_retry.is_a?(Integer)
|
195
|
+
msg_retry
|
196
|
+
else
|
197
|
+
default
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
def delay_for(worker, count, exception)
|
202
|
+
worker && worker.sidekiq_retry_in_block? && retry_in(worker, count, exception) || seconds_to_delay(count)
|
203
|
+
end
|
204
|
+
|
205
|
+
# delayed_job uses the same basic formula
|
206
|
+
def seconds_to_delay(count)
|
207
|
+
(count ** 4) + 15 + (rand(30)*(count+1))
|
208
|
+
end
|
209
|
+
|
210
|
+
def retry_in(worker, count, exception)
|
211
|
+
begin
|
212
|
+
worker.sidekiq_retry_in_block.call(count, exception).to_i
|
213
|
+
rescue Exception => e
|
214
|
+
handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
|
215
|
+
nil
|
216
|
+
end
|
217
|
+
end
|
218
|
+
|
219
|
+
def exception_caused_by_shutdown?(e, checked_causes = [])
|
220
|
+
# In Ruby 2.1.0 only, check if exception is a result of shutdown.
|
221
|
+
return false unless defined?(e.cause)
|
222
|
+
|
223
|
+
# Handle circular causes
|
224
|
+
checked_causes << e.object_id
|
225
|
+
return false if checked_causes.include?(e.cause.object_id)
|
226
|
+
|
227
|
+
e.cause.instance_of?(Sidekiq::Shutdown) ||
|
228
|
+
exception_caused_by_shutdown?(e.cause, checked_causes)
|
229
|
+
end
|
230
|
+
|
231
|
+
end
|
232
|
+
end
|
data/lib/sidekiq/launcher.rb
CHANGED
@@ -61,8 +61,6 @@ module Sidekiq
|
|
61
61
|
|
62
62
|
private unless $TESTING
|
63
63
|
|
64
|
-
JVM_RESERVED_SIGNALS = ['USR1', 'USR2'] # Don't Process#kill if we get these signals via the API
|
65
|
-
|
66
64
|
def heartbeat
|
67
65
|
results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
|
68
66
|
results.compact!
|
@@ -110,11 +108,7 @@ module Sidekiq
|
|
110
108
|
|
111
109
|
return unless msg
|
112
110
|
|
113
|
-
|
114
|
-
Sidekiq::CLI.instance.handle_signal(msg)
|
115
|
-
else
|
116
|
-
::Process.kill(msg, $$)
|
117
|
-
end
|
111
|
+
::Process.kill(msg, $$)
|
118
112
|
rescue => e
|
119
113
|
# ignore all redis/network issues
|
120
114
|
logger.error("heartbeat: #{e.message}")
|
@@ -2,6 +2,15 @@ module Sidekiq
|
|
2
2
|
module Middleware
|
3
3
|
module Server
|
4
4
|
class ActiveRecord
|
5
|
+
|
6
|
+
def initialize
|
7
|
+
# With Rails 5+ we must use the Reloader **always**.
|
8
|
+
# The reloader handles code loading and db connection management.
|
9
|
+
if ::Rails::VERSION::MAJOR >= 5
|
10
|
+
raise ArgumentError, "Rails 5 no longer needs or uses the ActiveRecord middleware."
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
5
14
|
def call(*args)
|
6
15
|
yield
|
7
16
|
ensure
|
data/lib/sidekiq/processor.rb
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
require 'sidekiq/util'
|
3
3
|
require 'sidekiq/fetch'
|
4
|
+
require 'sidekiq/job_logger'
|
5
|
+
require 'sidekiq/job_retry'
|
4
6
|
require 'thread'
|
5
7
|
require 'concurrent/map'
|
6
8
|
require 'concurrent/atomic/atomic_fixnum'
|
@@ -37,7 +39,8 @@ module Sidekiq
|
|
37
39
|
@thread = nil
|
38
40
|
@strategy = (mgr.options[:fetch] || Sidekiq::BasicFetch).new(mgr.options)
|
39
41
|
@reloader = Sidekiq.options[:reloader]
|
40
|
-
@
|
42
|
+
@logging = Sidekiq::JobLogger.new
|
43
|
+
@retrier = Sidekiq::JobRetry.new
|
41
44
|
end
|
42
45
|
|
43
46
|
def terminate(wait=false)
|
@@ -116,32 +119,59 @@ module Sidekiq
|
|
116
119
|
nil
|
117
120
|
end
|
118
121
|
|
122
|
+
def dispatch(job_hash, queue)
|
123
|
+
# since middleware can mutate the job hash
|
124
|
+
# we clone here so we report the original
|
125
|
+
# job structure to the Web UI
|
126
|
+
pristine = cloned(job_hash)
|
127
|
+
|
128
|
+
@retrier.global(job_hash, queue) do
|
129
|
+
@logging.call(job_hash, queue) do
|
130
|
+
stats(pristine, queue) do
|
131
|
+
# Rails 5 requires a Reloader to wrap code execution. In order to
|
132
|
+
# constantize the worker and instantiate an instance, we have to call
|
133
|
+
# the Reloader. It handles code loading, db connection management, etc.
|
134
|
+
# Effectively this block denotes a "unit of work" to Rails.
|
135
|
+
@reloader.call do
|
136
|
+
klass = job_hash['class'.freeze].constantize
|
137
|
+
worker = klass.new
|
138
|
+
worker.jid = job_hash['jid'.freeze]
|
139
|
+
@retrier.local(worker, job_hash, queue) do
|
140
|
+
yield worker
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
119
148
|
def process(work)
|
120
149
|
jobstr = work.job
|
121
150
|
queue = work.queue_name
|
122
151
|
|
123
152
|
ack = false
|
124
153
|
begin
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
# successfully completed it. This prevents us from
|
138
|
-
# losing jobs if a middleware raises an exception before yielding
|
139
|
-
execute_job(worker, cloned(job_hash['args'.freeze]))
|
140
|
-
end
|
141
|
-
end
|
142
|
-
end
|
143
|
-
end
|
154
|
+
# Treat malformed JSON like a process crash -- don't acknowledge it.
|
155
|
+
# * In Sidekiq, the error will be logged but job discarded.
|
156
|
+
# * In Sidekiq Pro, the error will be logged and the job retried when
|
157
|
+
# it is recovered by the reliability algorithm. The job may act like
|
158
|
+
# a poison pill and never execute until manually removed but job loss
|
159
|
+
# is considered worse.
|
160
|
+
job_hash = nil
|
161
|
+
begin
|
162
|
+
job_hash = Sidekiq.load_json(jobstr)
|
163
|
+
rescue => ex
|
164
|
+
Sidekiq.logger.error { "Pushing job to dead queue due to invalid JSON: #{ex}" }
|
165
|
+
send_to_morgue(jobstr)
|
144
166
|
ack = true
|
167
|
+
raise
|
168
|
+
end
|
169
|
+
|
170
|
+
ack = true
|
171
|
+
dispatch(job_hash, queue) do |worker|
|
172
|
+
Sidekiq.server_middleware.invoke(worker, job_hash, queue) do
|
173
|
+
execute_job(worker, cloned(job_hash['args'.freeze]))
|
174
|
+
end
|
145
175
|
end
|
146
176
|
rescue Sidekiq::Shutdown
|
147
177
|
# Had to force kill this job because it didn't finish
|
@@ -156,11 +186,15 @@ module Sidekiq
|
|
156
186
|
end
|
157
187
|
end
|
158
188
|
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
189
|
+
def send_to_morgue(msg)
|
190
|
+
now = Time.now.to_f
|
191
|
+
Sidekiq.redis do |conn|
|
192
|
+
conn.multi do
|
193
|
+
conn.zadd('dead', now, msg)
|
194
|
+
conn.zremrangebyscore('dead', '-inf', now - DeadSet.timeout)
|
195
|
+
conn.zremrangebyrank('dead', 0, -DeadSet.max_jobs)
|
196
|
+
end
|
197
|
+
end
|
164
198
|
end
|
165
199
|
|
166
200
|
def execute_job(worker, cloned_args)
|
@@ -175,9 +209,9 @@ module Sidekiq
|
|
175
209
|
PROCESSED = Concurrent::AtomicFixnum.new
|
176
210
|
FAILURE = Concurrent::AtomicFixnum.new
|
177
211
|
|
178
|
-
def stats(
|
212
|
+
def stats(job_hash, queue)
|
179
213
|
tid = thread_identity
|
180
|
-
WORKER_STATE[tid] = {:queue => queue, :payload =>
|
214
|
+
WORKER_STATE[tid] = {:queue => queue, :payload => job_hash, :run_at => Time.now.to_i }
|
181
215
|
|
182
216
|
begin
|
183
217
|
yield
|
@@ -193,8 +227,8 @@ module Sidekiq
|
|
193
227
|
# Deep clone the arguments passed to the worker so that if
|
194
228
|
# the job fails, what is pushed back onto Redis hasn't
|
195
229
|
# been mutated by the worker.
|
196
|
-
def cloned(
|
197
|
-
Marshal.load(Marshal.dump(
|
230
|
+
def cloned(thing)
|
231
|
+
Marshal.load(Marshal.dump(thing))
|
198
232
|
end
|
199
233
|
|
200
234
|
end
|