sr-sidekiq 4.1.6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +12 -0
- data/3.0-Upgrade.md +70 -0
- data/4.0-Upgrade.md +50 -0
- data/COMM-LICENSE (sidekiq) +95 -0
- data/Changes.md +1241 -0
- data/Ent-Changes.md +112 -0
- data/Gemfile +29 -0
- data/LICENSE (sidekiq) +9 -0
- data/LICENSE (sr-sidekiq) +5 -0
- data/Pro-2.0-Upgrade.md +138 -0
- data/Pro-3.0-Upgrade.md +44 -0
- data/Pro-Changes.md +539 -0
- data/README.md +8 -0
- data/Rakefile +9 -0
- data/bin/sidekiq +18 -0
- data/bin/sidekiqctl +99 -0
- data/bin/sidekiqload +167 -0
- data/code_of_conduct.md +50 -0
- data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
- data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
- data/lib/generators/sidekiq/worker_generator.rb +49 -0
- data/lib/sidekiq.rb +237 -0
- data/lib/sidekiq/api.rb +844 -0
- data/lib/sidekiq/cli.rb +389 -0
- data/lib/sidekiq/client.rb +260 -0
- data/lib/sidekiq/core_ext.rb +106 -0
- data/lib/sidekiq/exception_handler.rb +31 -0
- data/lib/sidekiq/extensions/action_mailer.rb +57 -0
- data/lib/sidekiq/extensions/active_record.rb +40 -0
- data/lib/sidekiq/extensions/class_methods.rb +40 -0
- data/lib/sidekiq/extensions/generic_proxy.rb +25 -0
- data/lib/sidekiq/fetch.rb +81 -0
- data/lib/sidekiq/launcher.rb +160 -0
- data/lib/sidekiq/logging.rb +106 -0
- data/lib/sidekiq/manager.rb +137 -0
- data/lib/sidekiq/middleware/chain.rb +150 -0
- data/lib/sidekiq/middleware/i18n.rb +42 -0
- data/lib/sidekiq/middleware/server/active_record.rb +13 -0
- data/lib/sidekiq/middleware/server/logging.rb +40 -0
- data/lib/sidekiq/middleware/server/retry_jobs.rb +205 -0
- data/lib/sidekiq/paginator.rb +43 -0
- data/lib/sidekiq/processor.rb +186 -0
- data/lib/sidekiq/rails.rb +39 -0
- data/lib/sidekiq/redis_connection.rb +97 -0
- data/lib/sidekiq/scheduled.rb +146 -0
- data/lib/sidekiq/testing.rb +316 -0
- data/lib/sidekiq/testing/inline.rb +29 -0
- data/lib/sidekiq/util.rb +62 -0
- data/lib/sidekiq/version.rb +4 -0
- data/lib/sidekiq/web.rb +278 -0
- data/lib/sidekiq/web_helpers.rb +255 -0
- data/lib/sidekiq/worker.rb +121 -0
- data/sidekiq.gemspec +26 -0
- data/sr-sidekiq-4.1.3.gem +0 -0
- data/sr-sidekiq-4.1.4.gem +0 -0
- data/sr-sidekiq-4.1.5.gem +0 -0
- data/test/config.yml +9 -0
- data/test/env_based_config.yml +11 -0
- data/test/fake_env.rb +1 -0
- data/test/fixtures/en.yml +2 -0
- data/test/helper.rb +75 -0
- data/test/test_actors.rb +138 -0
- data/test/test_api.rb +528 -0
- data/test/test_cli.rb +406 -0
- data/test/test_client.rb +262 -0
- data/test/test_exception_handler.rb +56 -0
- data/test/test_extensions.rb +127 -0
- data/test/test_fetch.rb +50 -0
- data/test/test_launcher.rb +85 -0
- data/test/test_logging.rb +35 -0
- data/test/test_manager.rb +50 -0
- data/test/test_middleware.rb +158 -0
- data/test/test_processor.rb +201 -0
- data/test/test_rails.rb +22 -0
- data/test/test_redis_connection.rb +127 -0
- data/test/test_retry.rb +326 -0
- data/test/test_retry_exhausted.rb +149 -0
- data/test/test_scheduled.rb +115 -0
- data/test/test_scheduling.rb +50 -0
- data/test/test_sidekiq.rb +107 -0
- data/test/test_testing.rb +143 -0
- data/test/test_testing_fake.rb +357 -0
- data/test/test_testing_inline.rb +94 -0
- data/test/test_util.rb +13 -0
- data/test/test_web.rb +614 -0
- data/test/test_web_helpers.rb +54 -0
- data/web/assets/images/bootstrap/glyphicons-halflings-white.png +0 -0
- data/web/assets/images/bootstrap/glyphicons-halflings.png +0 -0
- data/web/assets/images/favicon.ico +0 -0
- data/web/assets/images/logo.png +0 -0
- data/web/assets/images/status-sd8051fd480.png +0 -0
- data/web/assets/images/status/active.png +0 -0
- data/web/assets/images/status/idle.png +0 -0
- data/web/assets/javascripts/application.js +88 -0
- data/web/assets/javascripts/dashboard.js +300 -0
- data/web/assets/javascripts/locales/README.md +27 -0
- data/web/assets/javascripts/locales/jquery.timeago.ar.js +96 -0
- data/web/assets/javascripts/locales/jquery.timeago.bg.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.bs.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.ca.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.cs.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.cy.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.da.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.de.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.el.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.en-short.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.en.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.es.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.et.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.fa.js +22 -0
- data/web/assets/javascripts/locales/jquery.timeago.fi.js +28 -0
- data/web/assets/javascripts/locales/jquery.timeago.fr-short.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.fr.js +17 -0
- data/web/assets/javascripts/locales/jquery.timeago.he.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.hr.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.hu.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.hy.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.id.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.it.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.ja.js +19 -0
- data/web/assets/javascripts/locales/jquery.timeago.ko.js +17 -0
- data/web/assets/javascripts/locales/jquery.timeago.lt.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.mk.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.nl.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.no.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.pl.js +31 -0
- data/web/assets/javascripts/locales/jquery.timeago.pt-br.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.pt.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.ro.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.rs.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.ru.js +34 -0
- data/web/assets/javascripts/locales/jquery.timeago.sk.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.sl.js +44 -0
- data/web/assets/javascripts/locales/jquery.timeago.sv.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.th.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.tr.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.uk.js +34 -0
- data/web/assets/javascripts/locales/jquery.timeago.uz.js +19 -0
- data/web/assets/javascripts/locales/jquery.timeago.zh-cn.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.zh-tw.js +20 -0
- data/web/assets/stylesheets/application.css +754 -0
- data/web/assets/stylesheets/bootstrap.css +9 -0
- data/web/locales/cs.yml +78 -0
- data/web/locales/da.yml +68 -0
- data/web/locales/de.yml +69 -0
- data/web/locales/el.yml +68 -0
- data/web/locales/en.yml +79 -0
- data/web/locales/es.yml +69 -0
- data/web/locales/fr.yml +78 -0
- data/web/locales/hi.yml +75 -0
- data/web/locales/it.yml +69 -0
- data/web/locales/ja.yml +78 -0
- data/web/locales/ko.yml +68 -0
- data/web/locales/nb.yml +77 -0
- data/web/locales/nl.yml +68 -0
- data/web/locales/pl.yml +59 -0
- data/web/locales/pt-br.yml +68 -0
- data/web/locales/pt.yml +67 -0
- data/web/locales/ru.yml +78 -0
- data/web/locales/sv.yml +68 -0
- data/web/locales/ta.yml +75 -0
- data/web/locales/uk.yml +76 -0
- data/web/locales/zh-cn.yml +68 -0
- data/web/locales/zh-tw.yml +68 -0
- data/web/views/_footer.erb +17 -0
- data/web/views/_job_info.erb +88 -0
- data/web/views/_nav.erb +66 -0
- data/web/views/_paging.erb +23 -0
- data/web/views/_poll_js.erb +5 -0
- data/web/views/_poll_link.erb +7 -0
- data/web/views/_status.erb +4 -0
- data/web/views/_summary.erb +40 -0
- data/web/views/busy.erb +94 -0
- data/web/views/dashboard.erb +75 -0
- data/web/views/dead.erb +34 -0
- data/web/views/layout.erb +32 -0
- data/web/views/morgue.erb +71 -0
- data/web/views/queue.erb +45 -0
- data/web/views/queues.erb +28 -0
- data/web/views/retries.erb +74 -0
- data/web/views/retry.erb +34 -0
- data/web/views/scheduled.erb +54 -0
- data/web/views/scheduled_job_info.erb +8 -0
- metadata +408 -0
@@ -0,0 +1,40 @@
|
|
1
|
+
module Sidekiq
|
2
|
+
module Middleware
|
3
|
+
module Server
|
4
|
+
class Logging
|
5
|
+
|
6
|
+
def call(worker, item, queue)
|
7
|
+
Sidekiq::Logging.with_context(log_context(worker, item)) do
|
8
|
+
begin
|
9
|
+
start = Time.now
|
10
|
+
logger.info("start".freeze)
|
11
|
+
yield
|
12
|
+
logger.info("done: #{elapsed(start)} sec")
|
13
|
+
rescue Exception
|
14
|
+
logger.info("fail: #{elapsed(start)} sec")
|
15
|
+
raise
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
private
|
21
|
+
|
22
|
+
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
23
|
+
# attribute to expose the underlying thing.
|
24
|
+
def log_context(worker, item)
|
25
|
+
klass = item['wrapped'.freeze] || worker.class.to_s
|
26
|
+
"#{klass} JID-#{item['jid'.freeze]}#{" BID-#{item['bid'.freeze]}" if item['bid'.freeze]}"
|
27
|
+
end
|
28
|
+
|
29
|
+
def elapsed(start)
|
30
|
+
(Time.now - start).round(3)
|
31
|
+
end
|
32
|
+
|
33
|
+
def logger
|
34
|
+
Sidekiq.logger
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
@@ -0,0 +1,205 @@
|
|
1
|
+
require 'sidekiq/scheduled'
|
2
|
+
require 'sidekiq/api'
|
3
|
+
|
4
|
+
module Sidekiq
|
5
|
+
module Middleware
|
6
|
+
module Server
|
7
|
+
##
|
8
|
+
# Automatically retry jobs that fail in Sidekiq.
|
9
|
+
# Sidekiq's retry support assumes a typical development lifecycle:
|
10
|
+
#
|
11
|
+
# 0. Push some code changes with a bug in it.
|
12
|
+
# 1. Bug causes job processing to fail, Sidekiq's middleware captures
|
13
|
+
# the job and pushes it onto a retry queue.
|
14
|
+
# 2. Sidekiq retries jobs in the retry queue multiple times with
|
15
|
+
# an exponential delay, the job continues to fail.
|
16
|
+
# 3. After a few days, a developer deploys a fix. The job is
|
17
|
+
# reprocessed successfully.
|
18
|
+
# 4. Once retries are exhausted, Sidekiq will give up and move the
|
19
|
+
# job to the Dead Job Queue (aka morgue) where it must be dealt with
|
20
|
+
# manually in the Web UI.
|
21
|
+
# 5. After 6 months on the DJQ, Sidekiq will discard the job.
|
22
|
+
#
|
23
|
+
# A job looks like:
|
24
|
+
#
|
25
|
+
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
|
26
|
+
#
|
27
|
+
# The 'retry' option also accepts a number (in place of 'true'):
|
28
|
+
#
|
29
|
+
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
|
30
|
+
#
|
31
|
+
# The job will be retried this number of times before giving up. (If simply
|
32
|
+
# 'true', Sidekiq retries 25 times)
|
33
|
+
#
|
34
|
+
# We'll add a bit more data to the job to support retries:
|
35
|
+
#
|
36
|
+
# * 'queue' - the queue to use
|
37
|
+
# * 'retry_count' - number of times we've retried so far.
|
38
|
+
# * 'error_message' - the message from the exception
|
39
|
+
# * 'error_class' - the exception class
|
40
|
+
# * 'failed_at' - the first time it failed
|
41
|
+
# * 'retried_at' - the last time it was retried
|
42
|
+
# * 'backtrace' - the number of lines of error backtrace to store
|
43
|
+
#
|
44
|
+
# We don't store the backtrace by default as that can add a lot of overhead
|
45
|
+
# to the job and everyone is using an error service, right?
|
46
|
+
#
|
47
|
+
# The default number of retry attempts is 25 which works out to about 3 weeks
|
48
|
+
# of retries. You can pass a value for the max number of retry attempts when
|
49
|
+
# adding the middleware using the options hash:
|
50
|
+
#
|
51
|
+
# Sidekiq.configure_server do |config|
|
52
|
+
# config.server_middleware do |chain|
|
53
|
+
# chain.add Sidekiq::Middleware::Server::RetryJobs, :max_retries => 7
|
54
|
+
# end
|
55
|
+
# end
|
56
|
+
#
|
57
|
+
# or limit the number of retries for a particular worker with:
|
58
|
+
#
|
59
|
+
# class MyWorker
|
60
|
+
# include Sidekiq::Worker
|
61
|
+
# sidekiq_options :retry => 10
|
62
|
+
# end
|
63
|
+
#
|
64
|
+
class RetryJobs
|
65
|
+
include Sidekiq::Util
|
66
|
+
|
67
|
+
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
68
|
+
|
69
|
+
def initialize(options = {})
|
70
|
+
@max_retries = options.fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
|
71
|
+
end
|
72
|
+
|
73
|
+
def call(worker, msg, queue)
|
74
|
+
yield
|
75
|
+
rescue Sidekiq::Shutdown
|
76
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
77
|
+
raise
|
78
|
+
rescue Exception => e
|
79
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
80
|
+
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
81
|
+
|
82
|
+
raise e unless msg['retry']
|
83
|
+
attempt_retry(worker, msg, queue, e)
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
|
88
|
+
def attempt_retry(worker, msg, queue, exception)
|
89
|
+
max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
|
90
|
+
|
91
|
+
msg['queue'] = if msg['retry_queue']
|
92
|
+
msg['retry_queue']
|
93
|
+
else
|
94
|
+
queue
|
95
|
+
end
|
96
|
+
|
97
|
+
# App code can stuff all sorts of crazy binary data into the error message
|
98
|
+
# that won't convert to JSON.
|
99
|
+
m = exception.message.to_s[0, 10_000]
|
100
|
+
if m.respond_to?(:scrub!)
|
101
|
+
m.force_encoding("utf-8")
|
102
|
+
m.scrub!
|
103
|
+
end
|
104
|
+
|
105
|
+
msg['error_message'] = m
|
106
|
+
msg['error_class'] = exception.class.name
|
107
|
+
count = if msg['retry_count']
|
108
|
+
msg['retried_at'] = Time.now.to_f
|
109
|
+
msg['retry_count'] += 1
|
110
|
+
else
|
111
|
+
msg['failed_at'] = Time.now.to_f
|
112
|
+
msg['retry_count'] = 0
|
113
|
+
end
|
114
|
+
|
115
|
+
if msg['backtrace'] == true
|
116
|
+
msg['error_backtrace'] = exception.backtrace
|
117
|
+
elsif !msg['backtrace']
|
118
|
+
# do nothing
|
119
|
+
elsif msg['backtrace'].to_i != 0
|
120
|
+
msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
|
121
|
+
end
|
122
|
+
|
123
|
+
if count < max_retry_attempts
|
124
|
+
delay = delay_for(worker, count, exception)
|
125
|
+
logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
126
|
+
retry_at = Time.now.to_f + delay
|
127
|
+
payload = Sidekiq.dump_json(msg)
|
128
|
+
Sidekiq.redis do |conn|
|
129
|
+
conn.zadd('retry', retry_at.to_s, payload)
|
130
|
+
end
|
131
|
+
else
|
132
|
+
# Goodbye dear message, you (re)tried your best I'm sure.
|
133
|
+
retries_exhausted(worker, msg, exception)
|
134
|
+
end
|
135
|
+
|
136
|
+
raise exception
|
137
|
+
end
|
138
|
+
|
139
|
+
def retries_exhausted(worker, msg, exception)
|
140
|
+
logger.debug { "Retries exhausted for job" }
|
141
|
+
begin
|
142
|
+
block = worker.sidekiq_retries_exhausted_block || Sidekiq.default_retries_exhausted
|
143
|
+
block.call(msg, exception) if block
|
144
|
+
rescue => e
|
145
|
+
handle_exception(e, { context: "Error calling retries_exhausted for #{worker.class}", job: msg })
|
146
|
+
end
|
147
|
+
|
148
|
+
send_to_morgue(msg) unless msg['dead'] == false
|
149
|
+
end
|
150
|
+
|
151
|
+
def send_to_morgue(msg)
|
152
|
+
Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
|
153
|
+
payload = Sidekiq.dump_json(msg)
|
154
|
+
now = Time.now.to_f
|
155
|
+
Sidekiq.redis do |conn|
|
156
|
+
conn.multi do
|
157
|
+
conn.zadd('dead', now, payload)
|
158
|
+
conn.zremrangebyscore('dead', '-inf', now - DeadSet.timeout)
|
159
|
+
conn.zremrangebyrank('dead', 0, -DeadSet.max_jobs)
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
def retry_attempts_from(msg_retry, default)
|
165
|
+
if msg_retry.is_a?(Fixnum)
|
166
|
+
msg_retry
|
167
|
+
else
|
168
|
+
default
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
def delay_for(worker, count, exception)
|
173
|
+
worker.sidekiq_retry_in_block? && retry_in(worker, count, exception) || seconds_to_delay(count)
|
174
|
+
end
|
175
|
+
|
176
|
+
# delayed_job uses the same basic formula
|
177
|
+
def seconds_to_delay(count)
|
178
|
+
(count ** 4) + 15 + (rand(30)*(count+1))
|
179
|
+
end
|
180
|
+
|
181
|
+
def retry_in(worker, count, exception)
|
182
|
+
begin
|
183
|
+
worker.sidekiq_retry_in_block.call(count, exception).to_i
|
184
|
+
rescue Exception => e
|
185
|
+
handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
|
186
|
+
nil
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
def exception_caused_by_shutdown?(e, checked_causes = [])
|
191
|
+
# In Ruby 2.1.0 only, check if exception is a result of shutdown.
|
192
|
+
return false unless defined?(e.cause)
|
193
|
+
|
194
|
+
# Handle circular causes
|
195
|
+
checked_causes << e.object_id
|
196
|
+
return false if checked_causes.include?(e.cause.object_id)
|
197
|
+
|
198
|
+
e.cause.instance_of?(Sidekiq::Shutdown) ||
|
199
|
+
exception_caused_by_shutdown?(e.cause, checked_causes)
|
200
|
+
end
|
201
|
+
|
202
|
+
end
|
203
|
+
end
|
204
|
+
end
|
205
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module Sidekiq
|
3
|
+
module Paginator
|
4
|
+
|
5
|
+
def page(key, pageidx=1, page_size=25, opts=nil)
|
6
|
+
current_page = pageidx.to_i < 1 ? 1 : pageidx.to_i
|
7
|
+
pageidx = current_page - 1
|
8
|
+
total_size = 0
|
9
|
+
items = []
|
10
|
+
starting = pageidx * page_size
|
11
|
+
ending = starting + page_size - 1
|
12
|
+
|
13
|
+
Sidekiq.redis do |conn|
|
14
|
+
type = conn.type(key)
|
15
|
+
|
16
|
+
case type
|
17
|
+
when 'zset'
|
18
|
+
rev = opts && opts[:reverse]
|
19
|
+
total_size, items = conn.multi do
|
20
|
+
conn.zcard(key)
|
21
|
+
if rev
|
22
|
+
conn.zrevrange(key, starting, ending, :with_scores => true)
|
23
|
+
else
|
24
|
+
conn.zrange(key, starting, ending, :with_scores => true)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
[current_page, total_size, items]
|
28
|
+
when 'list'
|
29
|
+
total_size, items = conn.multi do
|
30
|
+
conn.llen(key)
|
31
|
+
conn.lrange(key, starting, ending)
|
32
|
+
end
|
33
|
+
[current_page, total_size, items]
|
34
|
+
when 'none'
|
35
|
+
[1, 0, []]
|
36
|
+
else
|
37
|
+
raise "can't page a #{type}"
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
end
|
43
|
+
end
|
@@ -0,0 +1,186 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'sidekiq/util'
|
3
|
+
require 'sidekiq/fetch'
|
4
|
+
require 'thread'
|
5
|
+
require 'concurrent/map'
|
6
|
+
require 'concurrent/atomic/atomic_fixnum'
|
7
|
+
|
8
|
+
module Sidekiq
|
9
|
+
##
|
10
|
+
# The Processor is a standalone thread which:
|
11
|
+
#
|
12
|
+
# 1. fetches a job from Redis
|
13
|
+
# 2. executes the job
|
14
|
+
# a. instantiate the Worker
|
15
|
+
# b. run the middleware chain
|
16
|
+
# c. call #perform
|
17
|
+
#
|
18
|
+
# A Processor can exit due to shutdown (processor_stopped)
|
19
|
+
# or due to an error during job execution (processor_died)
|
20
|
+
#
|
21
|
+
# If an error occurs in the job execution, the
|
22
|
+
# Processor calls the Manager to create a new one
|
23
|
+
# to replace itself and exits.
|
24
|
+
#
|
25
|
+
class Processor
|
26
|
+
|
27
|
+
include Util
|
28
|
+
|
29
|
+
attr_reader :thread
|
30
|
+
attr_reader :job
|
31
|
+
|
32
|
+
def initialize(mgr)
|
33
|
+
@mgr = mgr
|
34
|
+
@down = false
|
35
|
+
@done = false
|
36
|
+
@job = nil
|
37
|
+
@thread = nil
|
38
|
+
@strategy = (mgr.options[:fetch] || Sidekiq::BasicFetch).new(mgr.options)
|
39
|
+
end
|
40
|
+
|
41
|
+
def terminate(wait=false)
|
42
|
+
@done = true
|
43
|
+
return if !@thread
|
44
|
+
@thread.value if wait
|
45
|
+
end
|
46
|
+
|
47
|
+
def kill(wait=false)
|
48
|
+
@done = true
|
49
|
+
return if !@thread
|
50
|
+
# unlike the other actors, terminate does not wait
|
51
|
+
# for the thread to finish because we don't know how
|
52
|
+
# long the job will take to finish. Instead we
|
53
|
+
# provide a `kill` method to call after the shutdown
|
54
|
+
# timeout passes.
|
55
|
+
@thread.raise ::Sidekiq::Shutdown
|
56
|
+
@thread.value if wait
|
57
|
+
end
|
58
|
+
|
59
|
+
def start
|
60
|
+
@thread ||= safe_thread("processor", &method(:run))
|
61
|
+
end
|
62
|
+
|
63
|
+
private unless $TESTING
|
64
|
+
|
65
|
+
def run
|
66
|
+
begin
|
67
|
+
while !@done
|
68
|
+
process_one
|
69
|
+
end
|
70
|
+
@mgr.processor_stopped(self)
|
71
|
+
rescue Sidekiq::Shutdown
|
72
|
+
@mgr.processor_stopped(self)
|
73
|
+
rescue Exception => ex
|
74
|
+
@mgr.processor_died(self, ex)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
def process_one
|
79
|
+
@job = fetch
|
80
|
+
process(@job) if @job
|
81
|
+
@job = nil
|
82
|
+
end
|
83
|
+
|
84
|
+
def get_one
|
85
|
+
begin
|
86
|
+
work = @strategy.retrieve_work
|
87
|
+
(logger.info { "Redis is online, #{Time.now - @down} sec downtime" }; @down = nil) if @down
|
88
|
+
work
|
89
|
+
rescue Sidekiq::Shutdown
|
90
|
+
rescue => ex
|
91
|
+
handle_fetch_exception(ex)
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
def fetch
|
96
|
+
j = get_one
|
97
|
+
if j && @done
|
98
|
+
j.requeue
|
99
|
+
nil
|
100
|
+
else
|
101
|
+
j
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
def handle_fetch_exception(ex)
|
106
|
+
if !@down
|
107
|
+
@down = Time.now
|
108
|
+
logger.error("Error fetching job: #{ex}")
|
109
|
+
ex.backtrace.each do |bt|
|
110
|
+
logger.error(bt)
|
111
|
+
end
|
112
|
+
end
|
113
|
+
sleep(1)
|
114
|
+
nil
|
115
|
+
end
|
116
|
+
|
117
|
+
def process(work)
|
118
|
+
jobstr = work.job
|
119
|
+
queue = work.queue_name
|
120
|
+
|
121
|
+
ack = false
|
122
|
+
begin
|
123
|
+
job = Sidekiq.load_json(jobstr)
|
124
|
+
klass = job['class'.freeze].constantize
|
125
|
+
worker = klass.new
|
126
|
+
worker.jid = job['jid'.freeze]
|
127
|
+
|
128
|
+
stats(worker, job, queue) do
|
129
|
+
Sidekiq.server_middleware.invoke(worker, job, queue) do
|
130
|
+
# Only ack if we either attempted to start this job or
|
131
|
+
# successfully completed it. This prevents us from
|
132
|
+
# losing jobs if a middleware raises an exception before yielding
|
133
|
+
ack = true
|
134
|
+
execute_job(worker, cloned(job['args'.freeze]))
|
135
|
+
end
|
136
|
+
end
|
137
|
+
ack = true
|
138
|
+
rescue Sidekiq::Shutdown
|
139
|
+
# Had to force kill this job because it didn't finish
|
140
|
+
# within the timeout. Don't acknowledge the work since
|
141
|
+
# we didn't properly finish it.
|
142
|
+
ack = false
|
143
|
+
rescue Exception => ex
|
144
|
+
handle_exception(ex, job || { :job => jobstr })
|
145
|
+
raise
|
146
|
+
ensure
|
147
|
+
work.acknowledge if ack
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
def execute_job(worker, cloned_args)
|
152
|
+
worker.perform(*cloned_args)
|
153
|
+
end
|
154
|
+
|
155
|
+
def thread_identity
|
156
|
+
@str ||= Thread.current.object_id.to_s(36)
|
157
|
+
end
|
158
|
+
|
159
|
+
WORKER_STATE = Concurrent::Map.new
|
160
|
+
PROCESSED = Concurrent::AtomicFixnum.new
|
161
|
+
FAILURE = Concurrent::AtomicFixnum.new
|
162
|
+
|
163
|
+
def stats(worker, job, queue)
|
164
|
+
tid = thread_identity
|
165
|
+
WORKER_STATE[tid] = {:queue => queue, :payload => job, :run_at => Time.now.to_i }
|
166
|
+
|
167
|
+
begin
|
168
|
+
yield
|
169
|
+
rescue Exception
|
170
|
+
FAILURE.increment
|
171
|
+
raise
|
172
|
+
ensure
|
173
|
+
WORKER_STATE.delete(tid)
|
174
|
+
PROCESSED.increment
|
175
|
+
end
|
176
|
+
end
|
177
|
+
|
178
|
+
# Deep clone the arguments passed to the worker so that if
|
179
|
+
# the job fails, what is pushed back onto Redis hasn't
|
180
|
+
# been mutated by the worker.
|
181
|
+
def cloned(ary)
|
182
|
+
Marshal.load(Marshal.dump(ary))
|
183
|
+
end
|
184
|
+
|
185
|
+
end
|
186
|
+
end
|