sidekiq 6.4.0 → 6.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +50 -1
- data/README.md +6 -1
- data/bin/sidekiq +3 -3
- data/bin/sidekiqload +70 -66
- data/bin/sidekiqmon +1 -1
- data/lib/sidekiq/api.rb +78 -65
- data/lib/sidekiq/cli.rb +46 -37
- data/lib/sidekiq/client.rb +42 -28
- data/lib/sidekiq/component.rb +64 -0
- data/lib/sidekiq/delay.rb +1 -1
- data/lib/sidekiq/extensions/action_mailer.rb +2 -2
- data/lib/sidekiq/extensions/active_record.rb +2 -2
- data/lib/sidekiq/extensions/class_methods.rb +2 -2
- data/lib/sidekiq/extensions/generic_proxy.rb +3 -3
- data/lib/sidekiq/fetch.rb +17 -15
- data/lib/sidekiq/job_logger.rb +15 -27
- data/lib/sidekiq/job_retry.rb +27 -26
- data/lib/sidekiq/job_util.rb +15 -9
- data/lib/sidekiq/launcher.rb +54 -52
- data/lib/sidekiq/logger.rb +8 -18
- data/lib/sidekiq/manager.rb +28 -25
- data/lib/sidekiq/middleware/chain.rb +22 -13
- data/lib/sidekiq/middleware/current_attributes.rb +4 -0
- data/lib/sidekiq/middleware/i18n.rb +6 -4
- data/lib/sidekiq/middleware/modules.rb +19 -0
- data/lib/sidekiq/monitor.rb +1 -1
- data/lib/sidekiq/paginator.rb +8 -8
- data/lib/sidekiq/processor.rb +38 -38
- data/lib/sidekiq/rails.rb +15 -8
- data/lib/sidekiq/redis_client_adapter.rb +154 -0
- data/lib/sidekiq/redis_connection.rb +81 -48
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +11 -10
- data/lib/sidekiq/testing/inline.rb +4 -4
- data/lib/sidekiq/testing.rb +37 -36
- data/lib/sidekiq/transaction_aware_client.rb +45 -0
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web/csrf_protection.rb +2 -2
- data/lib/sidekiq/web/helpers.rb +5 -5
- data/lib/sidekiq/web.rb +3 -3
- data/lib/sidekiq/worker.rb +20 -17
- data/lib/sidekiq.rb +98 -30
- data/web/assets/javascripts/application.js +58 -26
- data/web/assets/stylesheets/application.css +1 -0
- data/web/locales/pt-br.yml +27 -9
- data/web/views/_summary.erb +1 -1
- data/web/views/busy.erb +3 -3
- metadata +8 -5
- data/lib/sidekiq/exception_handler.rb +0 -27
- data/lib/sidekiq/util.rb +0 -108
data/lib/sidekiq/job_retry.rb
CHANGED
@@ -25,11 +25,11 @@ module Sidekiq
|
|
25
25
|
#
|
26
26
|
# A job looks like:
|
27
27
|
#
|
28
|
-
# { 'class' => '
|
28
|
+
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
|
29
29
|
#
|
30
30
|
# The 'retry' option also accepts a number (in place of 'true'):
|
31
31
|
#
|
32
|
-
# { 'class' => '
|
32
|
+
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
|
33
33
|
#
|
34
34
|
# The job will be retried this number of times before giving up. (If simply
|
35
35
|
# 'true', Sidekiq retries 25 times)
|
@@ -53,11 +53,11 @@ module Sidekiq
|
|
53
53
|
#
|
54
54
|
# Sidekiq.options[:max_retries] = 7
|
55
55
|
#
|
56
|
-
# or limit the number of retries for a particular
|
56
|
+
# or limit the number of retries for a particular job and send retries to
|
57
57
|
# a low priority queue with:
|
58
58
|
#
|
59
|
-
# class
|
60
|
-
# include Sidekiq::
|
59
|
+
# class MyJob
|
60
|
+
# include Sidekiq::Job
|
61
61
|
# sidekiq_options retry: 10, retry_queue: 'low'
|
62
62
|
# end
|
63
63
|
#
|
@@ -66,17 +66,18 @@ module Sidekiq
|
|
66
66
|
|
67
67
|
class Skip < Handled; end
|
68
68
|
|
69
|
-
include Sidekiq::
|
69
|
+
include Sidekiq::Component
|
70
70
|
|
71
71
|
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
72
72
|
|
73
|
-
def initialize(options
|
74
|
-
@
|
73
|
+
def initialize(options)
|
74
|
+
@config = options
|
75
|
+
@max_retries = @config[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
|
75
76
|
end
|
76
77
|
|
77
78
|
# The global retry handler requires only the barest of data.
|
78
79
|
# We want to be able to retry as much as possible so we don't
|
79
|
-
# require the
|
80
|
+
# require the job to be instantiated.
|
80
81
|
def global(jobstr, queue)
|
81
82
|
yield
|
82
83
|
rescue Handled => ex
|
@@ -103,14 +104,14 @@ module Sidekiq
|
|
103
104
|
end
|
104
105
|
|
105
106
|
# The local retry support means that any errors that occur within
|
106
|
-
# this block can be associated with the given
|
107
|
+
# this block can be associated with the given job instance.
|
107
108
|
# This is required to support the `sidekiq_retries_exhausted` block.
|
108
109
|
#
|
109
110
|
# Note that any exception from the block is wrapped in the Skip
|
110
111
|
# exception so the global block does not reprocess the error. The
|
111
112
|
# Skip exception is unwrapped within Sidekiq::Processor#process before
|
112
113
|
# calling the handle_exception handlers.
|
113
|
-
def local(
|
114
|
+
def local(jobinst, jobstr, queue)
|
114
115
|
yield
|
115
116
|
rescue Handled => ex
|
116
117
|
raise ex
|
@@ -123,11 +124,11 @@ module Sidekiq
|
|
123
124
|
|
124
125
|
msg = Sidekiq.load_json(jobstr)
|
125
126
|
if msg["retry"].nil?
|
126
|
-
msg["retry"] =
|
127
|
+
msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
|
127
128
|
end
|
128
129
|
|
129
130
|
raise e unless msg["retry"]
|
130
|
-
attempt_retry(
|
131
|
+
attempt_retry(jobinst, msg, queue, e)
|
131
132
|
# We've handled this error associated with this job, don't
|
132
133
|
# need to handle it at the global level
|
133
134
|
raise Skip
|
@@ -135,10 +136,10 @@ module Sidekiq
|
|
135
136
|
|
136
137
|
private
|
137
138
|
|
138
|
-
# Note that +
|
139
|
-
# instantiate the
|
139
|
+
# Note that +jobinst+ can be nil here if an error is raised before we can
|
140
|
+
# instantiate the job instance. All access must be guarded and
|
140
141
|
# best effort.
|
141
|
-
def attempt_retry(
|
142
|
+
def attempt_retry(jobinst, msg, queue, exception)
|
142
143
|
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
|
143
144
|
|
144
145
|
msg["queue"] = (msg["retry_queue"] || queue)
|
@@ -170,7 +171,7 @@ module Sidekiq
|
|
170
171
|
end
|
171
172
|
|
172
173
|
if count < max_retry_attempts
|
173
|
-
delay = delay_for(
|
174
|
+
delay = delay_for(jobinst, count, exception)
|
174
175
|
# Logging here can break retries if the logging device raises ENOSPC #3979
|
175
176
|
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
176
177
|
retry_at = Time.now.to_f + delay
|
@@ -180,13 +181,13 @@ module Sidekiq
|
|
180
181
|
end
|
181
182
|
else
|
182
183
|
# Goodbye dear message, you (re)tried your best I'm sure.
|
183
|
-
retries_exhausted(
|
184
|
+
retries_exhausted(jobinst, msg, exception)
|
184
185
|
end
|
185
186
|
end
|
186
187
|
|
187
|
-
def retries_exhausted(
|
188
|
+
def retries_exhausted(jobinst, msg, exception)
|
188
189
|
begin
|
189
|
-
block =
|
190
|
+
block = jobinst&.sidekiq_retries_exhausted_block
|
190
191
|
block&.call(msg, exception)
|
191
192
|
rescue => e
|
192
193
|
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
|
@@ -215,19 +216,19 @@ module Sidekiq
|
|
215
216
|
end
|
216
217
|
end
|
217
218
|
|
218
|
-
def delay_for(
|
219
|
+
def delay_for(jobinst, count, exception)
|
219
220
|
jitter = rand(10) * (count + 1)
|
220
|
-
if
|
221
|
-
custom_retry_in = retry_in(
|
221
|
+
if jobinst&.sidekiq_retry_in_block
|
222
|
+
custom_retry_in = retry_in(jobinst, count, exception).to_i
|
222
223
|
return custom_retry_in + jitter if custom_retry_in > 0
|
223
224
|
end
|
224
225
|
(count**4) + 15 + jitter
|
225
226
|
end
|
226
227
|
|
227
|
-
def retry_in(
|
228
|
-
|
228
|
+
def retry_in(jobinst, count, exception)
|
229
|
+
jobinst.sidekiq_retry_in_block.call(count, exception)
|
229
230
|
rescue Exception => e
|
230
|
-
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{
|
231
|
+
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
|
231
232
|
nil
|
232
233
|
end
|
233
234
|
|
data/lib/sidekiq/job_util.rb
CHANGED
@@ -4,7 +4,8 @@ require "time"
|
|
4
4
|
module Sidekiq
|
5
5
|
module JobUtil
|
6
6
|
# These functions encapsulate various job utilities.
|
7
|
-
|
7
|
+
|
8
|
+
TRANSIENT_ATTRIBUTES = %w[]
|
8
9
|
|
9
10
|
def validate(item)
|
10
11
|
raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
|
@@ -12,16 +13,19 @@ module Sidekiq
|
|
12
13
|
raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
|
13
14
|
raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
|
14
15
|
raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
|
16
|
+
end
|
15
17
|
|
16
|
-
|
18
|
+
def verify_json(item)
|
19
|
+
job_class = item["wrapped"] || item["class"]
|
20
|
+
if Sidekiq[:on_complex_arguments] == :raise
|
17
21
|
msg = <<~EOM
|
18
|
-
Job arguments to #{
|
22
|
+
Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
|
19
23
|
To disable this error, remove `Sidekiq.strict_args!` from your initializer.
|
20
24
|
EOM
|
21
25
|
raise(ArgumentError, msg) unless json_safe?(item)
|
22
|
-
elsif Sidekiq
|
26
|
+
elsif Sidekiq[:on_complex_arguments] == :warn
|
23
27
|
Sidekiq.logger.warn <<~EOM unless json_safe?(item)
|
24
|
-
Job arguments to #{
|
28
|
+
Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
|
25
29
|
Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
|
26
30
|
by calling `Sidekiq.strict_args!` during Sidekiq initialization.
|
27
31
|
EOM
|
@@ -39,20 +43,22 @@ module Sidekiq
|
|
39
43
|
|
40
44
|
raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
|
41
45
|
|
46
|
+
# remove job attributes which aren't necessary to persist into Redis
|
47
|
+
TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) }
|
48
|
+
|
49
|
+
item["jid"] ||= SecureRandom.hex(12)
|
42
50
|
item["class"] = item["class"].to_s
|
43
51
|
item["queue"] = item["queue"].to_s
|
44
|
-
item["jid"] ||= SecureRandom.hex(12)
|
45
52
|
item["created_at"] ||= Time.now.to_f
|
46
|
-
|
47
53
|
item
|
48
54
|
end
|
49
55
|
|
50
56
|
def normalized_hash(item_class)
|
51
57
|
if item_class.is_a?(Class)
|
52
|
-
raise(ArgumentError, "Message must include a Sidekiq::
|
58
|
+
raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
|
53
59
|
item_class.get_sidekiq_options
|
54
60
|
else
|
55
|
-
Sidekiq.
|
61
|
+
Sidekiq.default_job_options
|
56
62
|
end
|
57
63
|
end
|
58
64
|
|
data/lib/sidekiq/launcher.rb
CHANGED
@@ -3,11 +3,12 @@
|
|
3
3
|
require "sidekiq/manager"
|
4
4
|
require "sidekiq/fetch"
|
5
5
|
require "sidekiq/scheduled"
|
6
|
+
require "sidekiq/ring_buffer"
|
6
7
|
|
7
8
|
module Sidekiq
|
8
9
|
# The Launcher starts the Manager and Poller threads and provides the process heartbeat.
|
9
10
|
class Launcher
|
10
|
-
include
|
11
|
+
include Sidekiq::Component
|
11
12
|
|
12
13
|
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
|
13
14
|
|
@@ -15,18 +16,18 @@ module Sidekiq
|
|
15
16
|
proc { "sidekiq" },
|
16
17
|
proc { Sidekiq::VERSION },
|
17
18
|
proc { |me, data| data["tag"] },
|
18
|
-
proc { |me, data| "[#{Processor::
|
19
|
+
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{data["concurrency"]} busy]" },
|
19
20
|
proc { |me, data| "stopping" if me.stopping? }
|
20
21
|
]
|
21
22
|
|
22
23
|
attr_accessor :manager, :poller, :fetcher
|
23
24
|
|
24
25
|
def initialize(options)
|
26
|
+
@config = options
|
25
27
|
options[:fetch] ||= BasicFetch.new(options)
|
26
28
|
@manager = Sidekiq::Manager.new(options)
|
27
|
-
@poller = Sidekiq::Scheduled::Poller.new
|
29
|
+
@poller = Sidekiq::Scheduled::Poller.new(options)
|
28
30
|
@done = false
|
29
|
-
@options = options
|
30
31
|
end
|
31
32
|
|
32
33
|
def run
|
@@ -43,11 +44,9 @@ module Sidekiq
|
|
43
44
|
@poller.terminate
|
44
45
|
end
|
45
46
|
|
46
|
-
# Shuts down
|
47
|
-
# return until all work is complete and cleaned up.
|
48
|
-
# It can take up to the timeout to complete.
|
47
|
+
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
|
49
48
|
def stop
|
50
|
-
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @
|
49
|
+
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
|
51
50
|
|
52
51
|
@done = true
|
53
52
|
@manager.quiet
|
@@ -55,10 +54,10 @@ module Sidekiq
|
|
55
54
|
|
56
55
|
@manager.stop(deadline)
|
57
56
|
|
58
|
-
# Requeue everything in case there was a
|
57
|
+
# Requeue everything in case there was a thread which fetched a job while the process was stopped.
|
59
58
|
# This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
|
60
|
-
strategy = @
|
61
|
-
strategy.bulk_requeue([], @
|
59
|
+
strategy = @config[:fetch]
|
60
|
+
strategy.bulk_requeue([], @config)
|
62
61
|
|
63
62
|
clear_heartbeat
|
64
63
|
end
|
@@ -76,17 +75,17 @@ module Sidekiq
|
|
76
75
|
heartbeat
|
77
76
|
sleep BEAT_PAUSE
|
78
77
|
end
|
79
|
-
|
78
|
+
logger.info("Heartbeat stopping...")
|
80
79
|
end
|
81
80
|
|
82
81
|
def clear_heartbeat
|
83
82
|
# Remove record from Redis since we are shutting down.
|
84
83
|
# Note we don't stop the heartbeat thread; if the process
|
85
84
|
# doesn't actually exit, it'll reappear in the Web UI.
|
86
|
-
|
87
|
-
conn.pipelined do
|
88
|
-
|
89
|
-
|
85
|
+
redis do |conn|
|
86
|
+
conn.pipelined do |pipeline|
|
87
|
+
pipeline.srem("processes", identity)
|
88
|
+
pipeline.unlink("#{identity}:work")
|
90
89
|
end
|
91
90
|
end
|
92
91
|
rescue
|
@@ -107,14 +106,14 @@ module Sidekiq
|
|
107
106
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
108
107
|
begin
|
109
108
|
Sidekiq.redis do |conn|
|
110
|
-
conn.pipelined do
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
109
|
+
conn.pipelined do |pipeline|
|
110
|
+
pipeline.incrby("stat:processed", procd)
|
111
|
+
pipeline.incrby("stat:processed:#{nowdate}", procd)
|
112
|
+
pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
|
113
|
+
|
114
|
+
pipeline.incrby("stat:failed", fails)
|
115
|
+
pipeline.incrby("stat:failed:#{nowdate}", fails)
|
116
|
+
pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
|
118
117
|
end
|
119
118
|
end
|
120
119
|
rescue => ex
|
@@ -132,26 +131,29 @@ module Sidekiq
|
|
132
131
|
begin
|
133
132
|
fails = Processor::FAILURE.reset
|
134
133
|
procd = Processor::PROCESSED.reset
|
135
|
-
curstate = Processor::
|
134
|
+
curstate = Processor::WORK_STATE.dup
|
136
135
|
|
137
|
-
workers_key = "#{key}:workers"
|
138
136
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
139
137
|
|
140
|
-
|
141
|
-
conn.multi do
|
142
|
-
|
143
|
-
|
144
|
-
|
138
|
+
redis do |conn|
|
139
|
+
conn.multi do |transaction|
|
140
|
+
transaction.incrby("stat:processed", procd)
|
141
|
+
transaction.incrby("stat:processed:#{nowdate}", procd)
|
142
|
+
transaction.expire("stat:processed:#{nowdate}", STATS_TTL)
|
145
143
|
|
146
|
-
|
147
|
-
|
148
|
-
|
144
|
+
transaction.incrby("stat:failed", fails)
|
145
|
+
transaction.incrby("stat:failed:#{nowdate}", fails)
|
146
|
+
transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
|
147
|
+
end
|
149
148
|
|
150
|
-
|
149
|
+
# work is the current set of executing jobs
|
150
|
+
work_key = "#{key}:work"
|
151
|
+
conn.pipelined do |transaction|
|
152
|
+
transaction.unlink(work_key)
|
151
153
|
curstate.each_pair do |tid, hash|
|
152
|
-
|
154
|
+
transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
|
153
155
|
end
|
154
|
-
|
156
|
+
transaction.expire(work_key, 60)
|
155
157
|
end
|
156
158
|
end
|
157
159
|
|
@@ -160,18 +162,18 @@ module Sidekiq
|
|
160
162
|
fails = procd = 0
|
161
163
|
kb = memory_usage(::Process.pid)
|
162
164
|
|
163
|
-
_, exists, _, _, msg =
|
164
|
-
conn.multi {
|
165
|
-
|
166
|
-
|
167
|
-
|
165
|
+
_, exists, _, _, msg = redis { |conn|
|
166
|
+
conn.multi { |transaction|
|
167
|
+
transaction.sadd("processes", key)
|
168
|
+
transaction.exists?(key)
|
169
|
+
transaction.hmset(key, "info", to_json,
|
168
170
|
"busy", curstate.size,
|
169
171
|
"beat", Time.now.to_f,
|
170
172
|
"rtt_us", rtt,
|
171
|
-
"quiet", @done,
|
173
|
+
"quiet", @done.to_s,
|
172
174
|
"rss", kb)
|
173
|
-
|
174
|
-
|
175
|
+
transaction.expire(key, 60)
|
176
|
+
transaction.rpop("#{key}-signals")
|
175
177
|
}
|
176
178
|
}
|
177
179
|
|
@@ -198,7 +200,7 @@ module Sidekiq
|
|
198
200
|
|
199
201
|
def check_rtt
|
200
202
|
a = b = 0
|
201
|
-
|
203
|
+
redis do |x|
|
202
204
|
a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
203
205
|
x.ping
|
204
206
|
b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
@@ -209,12 +211,12 @@ module Sidekiq
|
|
209
211
|
# Workable is < 10,000µs
|
210
212
|
# Log a warning if it's a disaster.
|
211
213
|
if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
|
212
|
-
|
214
|
+
logger.warn <<~EOM
|
213
215
|
Your Redis network connection is performing extremely poorly.
|
214
216
|
Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
|
215
217
|
Ensure Redis is running in the same AZ or datacenter as Sidekiq.
|
216
218
|
If these values are close to 100,000, that means your Sidekiq process may be
|
217
|
-
CPU
|
219
|
+
CPU-saturated; reduce your concurrency and/or see https://github.com/mperham/sidekiq/discussions/5039
|
218
220
|
EOM
|
219
221
|
RTT_READINGS.reset
|
220
222
|
end
|
@@ -246,10 +248,10 @@ module Sidekiq
|
|
246
248
|
"hostname" => hostname,
|
247
249
|
"started_at" => Time.now.to_f,
|
248
250
|
"pid" => ::Process.pid,
|
249
|
-
"tag" => @
|
250
|
-
"concurrency" => @
|
251
|
-
"queues" => @
|
252
|
-
"labels" => @
|
251
|
+
"tag" => @config[:tag] || "",
|
252
|
+
"concurrency" => @config[:concurrency],
|
253
|
+
"queues" => @config[:queues].uniq,
|
254
|
+
"labels" => @config[:labels],
|
253
255
|
"identity" => identity
|
254
256
|
}
|
255
257
|
end
|
data/lib/sidekiq/logger.rb
CHANGED
@@ -16,6 +16,10 @@ module Sidekiq
|
|
16
16
|
def self.current
|
17
17
|
Thread.current[:sidekiq_context] ||= {}
|
18
18
|
end
|
19
|
+
|
20
|
+
def self.add(k, v)
|
21
|
+
current[k] = v
|
22
|
+
end
|
19
23
|
end
|
20
24
|
|
21
25
|
module LoggingUtils
|
@@ -31,24 +35,10 @@ module Sidekiq
|
|
31
35
|
nil
|
32
36
|
end
|
33
37
|
|
34
|
-
|
35
|
-
level
|
36
|
-
|
37
|
-
|
38
|
-
def info?
|
39
|
-
level <= 1
|
40
|
-
end
|
41
|
-
|
42
|
-
def warn?
|
43
|
-
level <= 2
|
44
|
-
end
|
45
|
-
|
46
|
-
def error?
|
47
|
-
level <= 3
|
48
|
-
end
|
49
|
-
|
50
|
-
def fatal?
|
51
|
-
level <= 4
|
38
|
+
LEVELS.each do |level, numeric_level|
|
39
|
+
define_method("#{level}?") do
|
40
|
+
local_level.nil? ? super() : local_level <= numeric_level
|
41
|
+
end
|
52
42
|
end
|
53
43
|
|
54
44
|
def local_level
|
data/lib/sidekiq/manager.rb
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "sidekiq/util"
|
4
3
|
require "sidekiq/processor"
|
5
4
|
require "sidekiq/fetch"
|
6
5
|
require "set"
|
@@ -21,37 +20,34 @@ module Sidekiq
|
|
21
20
|
# the shutdown process. The other tasks are performed by other threads.
|
22
21
|
#
|
23
22
|
class Manager
|
24
|
-
include
|
23
|
+
include Sidekiq::Component
|
25
24
|
|
26
25
|
attr_reader :workers
|
27
|
-
attr_reader :options
|
28
26
|
|
29
27
|
def initialize(options = {})
|
28
|
+
@config = options
|
30
29
|
logger.debug { options.inspect }
|
31
|
-
@options = options
|
32
30
|
@count = options[:concurrency] || 10
|
33
31
|
raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
|
34
32
|
|
35
33
|
@done = false
|
36
34
|
@workers = Set.new
|
37
35
|
@count.times do
|
38
|
-
@workers << Processor.new(
|
36
|
+
@workers << Processor.new(@config, &method(:processor_result))
|
39
37
|
end
|
40
38
|
@plock = Mutex.new
|
41
39
|
end
|
42
40
|
|
43
41
|
def start
|
44
|
-
@workers.each
|
45
|
-
x.start
|
46
|
-
end
|
42
|
+
@workers.each(&:start)
|
47
43
|
end
|
48
44
|
|
49
45
|
def quiet
|
50
46
|
return if @done
|
51
47
|
@done = true
|
52
48
|
|
53
|
-
logger.info { "Terminating quiet
|
54
|
-
@workers.each
|
49
|
+
logger.info { "Terminating quiet threads" }
|
50
|
+
@workers.each(&:terminate)
|
55
51
|
fire_event(:quiet, reverse: true)
|
56
52
|
end
|
57
53
|
|
@@ -65,24 +61,18 @@ module Sidekiq
|
|
65
61
|
sleep PAUSE_TIME
|
66
62
|
return if @workers.empty?
|
67
63
|
|
68
|
-
logger.info { "Pausing to allow
|
64
|
+
logger.info { "Pausing to allow jobs to finish..." }
|
69
65
|
wait_for(deadline) { @workers.empty? }
|
70
66
|
return if @workers.empty?
|
71
67
|
|
72
68
|
hard_shutdown
|
73
69
|
end
|
74
70
|
|
75
|
-
def
|
76
|
-
@plock.synchronize do
|
77
|
-
@workers.delete(processor)
|
78
|
-
end
|
79
|
-
end
|
80
|
-
|
81
|
-
def processor_died(processor, reason)
|
71
|
+
def processor_result(processor, reason = nil)
|
82
72
|
@plock.synchronize do
|
83
73
|
@workers.delete(processor)
|
84
74
|
unless @done
|
85
|
-
p = Processor.new(
|
75
|
+
p = Processor.new(@config, &method(:processor_result))
|
86
76
|
@workers << p
|
87
77
|
p.start
|
88
78
|
end
|
@@ -96,7 +86,7 @@ module Sidekiq
|
|
96
86
|
private
|
97
87
|
|
98
88
|
def hard_shutdown
|
99
|
-
# We've reached the timeout and we still have busy
|
89
|
+
# We've reached the timeout and we still have busy threads.
|
100
90
|
# They must die but their jobs shall live on.
|
101
91
|
cleanup = nil
|
102
92
|
@plock.synchronize do
|
@@ -106,17 +96,17 @@ module Sidekiq
|
|
106
96
|
if cleanup.size > 0
|
107
97
|
jobs = cleanup.map { |p| p.job }.compact
|
108
98
|
|
109
|
-
logger.warn { "Terminating #{cleanup.size} busy
|
110
|
-
logger.
|
99
|
+
logger.warn { "Terminating #{cleanup.size} busy threads" }
|
100
|
+
logger.debug { "Jobs still in progress #{jobs.inspect}" }
|
111
101
|
|
112
102
|
# Re-enqueue unfinished jobs
|
113
103
|
# NOTE: You may notice that we may push a job back to redis before
|
114
|
-
# the
|
104
|
+
# the thread is terminated. This is ok because Sidekiq's
|
115
105
|
# contract says that jobs are run AT LEAST once. Process termination
|
116
106
|
# is delayed until we're certain the jobs are back in Redis because
|
117
107
|
# it is worse to lose a job than to run it twice.
|
118
|
-
strategy = @
|
119
|
-
strategy.bulk_requeue(jobs, @
|
108
|
+
strategy = @config[:fetch]
|
109
|
+
strategy.bulk_requeue(jobs, @config)
|
120
110
|
end
|
121
111
|
|
122
112
|
cleanup.each do |processor|
|
@@ -129,5 +119,18 @@ module Sidekiq
|
|
129
119
|
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3
|
130
120
|
wait_for(deadline) { @workers.empty? }
|
131
121
|
end
|
122
|
+
|
123
|
+
# hack for quicker development / testing environment #2774
|
124
|
+
PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
|
125
|
+
|
126
|
+
# Wait for the orblock to be true or the deadline passed.
|
127
|
+
def wait_for(deadline, &condblock)
|
128
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
129
|
+
while remaining > PAUSE_TIME
|
130
|
+
return if condblock.call
|
131
|
+
sleep PAUSE_TIME
|
132
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
133
|
+
end
|
134
|
+
end
|
132
135
|
end
|
133
136
|
end
|