sidekiq 6.4.0 → 6.4.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +30 -1
- data/README.md +6 -1
- data/bin/sidekiq +3 -3
- data/bin/sidekiqload +57 -65
- data/bin/sidekiqmon +1 -1
- data/lib/sidekiq/api.rb +62 -58
- data/lib/sidekiq/cli.rb +14 -6
- data/lib/sidekiq/client.rb +41 -27
- data/lib/sidekiq/delay.rb +1 -1
- data/lib/sidekiq/extensions/action_mailer.rb +2 -2
- data/lib/sidekiq/extensions/active_record.rb +2 -2
- data/lib/sidekiq/extensions/class_methods.rb +2 -2
- data/lib/sidekiq/extensions/generic_proxy.rb +3 -3
- data/lib/sidekiq/fetch.rb +2 -2
- data/lib/sidekiq/job_logger.rb +15 -27
- data/lib/sidekiq/job_retry.rb +23 -23
- data/lib/sidekiq/job_util.rb +8 -6
- data/lib/sidekiq/launcher.rb +37 -36
- data/lib/sidekiq/logger.rb +8 -18
- data/lib/sidekiq/manager.rb +6 -6
- data/lib/sidekiq/middleware/chain.rb +4 -4
- data/lib/sidekiq/middleware/i18n.rb +4 -4
- data/lib/sidekiq/monitor.rb +1 -1
- data/lib/sidekiq/paginator.rb +8 -8
- data/lib/sidekiq/processor.rb +27 -27
- data/lib/sidekiq/rails.rb +10 -3
- data/lib/sidekiq/redis_connection.rb +2 -2
- data/lib/sidekiq/testing/inline.rb +4 -4
- data/lib/sidekiq/testing.rb +36 -35
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web/csrf_protection.rb +2 -2
- data/lib/sidekiq/web/helpers.rb +4 -4
- data/lib/sidekiq/web.rb +3 -3
- data/lib/sidekiq/worker.rb +19 -17
- data/lib/sidekiq.rb +22 -15
- data/web/assets/javascripts/application.js +58 -26
- data/web/assets/stylesheets/application.css +1 -0
- data/web/views/_summary.erb +1 -1
- data/web/views/busy.erb +3 -3
- metadata +3 -3
data/lib/sidekiq/client.rb
CHANGED
@@ -15,7 +15,7 @@ module Sidekiq
|
|
15
15
|
# client.middleware do |chain|
|
16
16
|
# chain.use MyClientMiddleware
|
17
17
|
# end
|
18
|
-
# client.push('class' => '
|
18
|
+
# client.push('class' => 'SomeJob', 'args' => [1,2,3])
|
19
19
|
#
|
20
20
|
# All client instances default to the globally-defined
|
21
21
|
# Sidekiq.client_middleware but you can change as necessary.
|
@@ -49,16 +49,16 @@ module Sidekiq
|
|
49
49
|
# The main method used to push a job to Redis. Accepts a number of options:
|
50
50
|
#
|
51
51
|
# queue - the named queue to use, default 'default'
|
52
|
-
# class - the
|
52
|
+
# class - the job class to call, required
|
53
53
|
# args - an array of simple arguments to the perform method, must be JSON-serializable
|
54
54
|
# at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f)
|
55
55
|
# retry - whether to retry this job if it fails, default true or an integer number of retries
|
56
56
|
# backtrace - whether to save any error backtrace, default false
|
57
57
|
#
|
58
58
|
# If class is set to the class name, the jobs' options will be based on Sidekiq's default
|
59
|
-
#
|
59
|
+
# job options. Otherwise, they will be based on the job class's options.
|
60
60
|
#
|
61
|
-
# Any options valid for a
|
61
|
+
# Any options valid for a job class's sidekiq_options are also available here.
|
62
62
|
#
|
63
63
|
# All options must be strings, not symbols. NB: because we are serializing to JSON, all
|
64
64
|
# symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
|
@@ -67,13 +67,15 @@ module Sidekiq
|
|
67
67
|
# Returns a unique Job ID. If middleware stops the job, nil will be returned instead.
|
68
68
|
#
|
69
69
|
# Example:
|
70
|
-
# push('queue' => 'my_queue', 'class' =>
|
70
|
+
# push('queue' => 'my_queue', 'class' => MyJob, 'args' => ['foo', 1, :bat => 'bar'])
|
71
71
|
#
|
72
72
|
def push(item)
|
73
73
|
normed = normalize_item(item)
|
74
|
-
payload =
|
75
|
-
|
74
|
+
payload = middleware.invoke(normed["class"], normed, normed["queue"], @redis_pool) do
|
75
|
+
normed
|
76
|
+
end
|
76
77
|
if payload
|
78
|
+
verify_json(payload)
|
77
79
|
raw_push([payload])
|
78
80
|
payload["jid"]
|
79
81
|
end
|
@@ -101,12 +103,17 @@ module Sidekiq
|
|
101
103
|
raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
|
102
104
|
raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
|
103
105
|
|
106
|
+
jid = items.delete("jid")
|
107
|
+
raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
|
108
|
+
|
104
109
|
normed = normalize_item(items)
|
105
110
|
payloads = args.map.with_index { |job_args, index|
|
106
|
-
copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12)
|
111
|
+
copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
|
107
112
|
copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
|
108
|
-
|
109
|
-
|
113
|
+
result = middleware.invoke(copy["class"], copy, copy["queue"], @redis_pool) do
|
114
|
+
verify_json(copy)
|
115
|
+
copy
|
116
|
+
end
|
110
117
|
result || nil
|
111
118
|
}.compact
|
112
119
|
|
@@ -119,8 +126,8 @@ module Sidekiq
|
|
119
126
|
#
|
120
127
|
# pool = ConnectionPool.new { Redis.new }
|
121
128
|
# Sidekiq::Client.via(pool) do
|
122
|
-
#
|
123
|
-
#
|
129
|
+
# SomeJob.perform_async(1,2,3)
|
130
|
+
# SomeOtherJob.perform_async(1,2,3)
|
124
131
|
# end
|
125
132
|
#
|
126
133
|
# Generally this is only needed for very large Sidekiq installs processing
|
@@ -145,10 +152,10 @@ module Sidekiq
|
|
145
152
|
end
|
146
153
|
|
147
154
|
# Resque compatibility helpers. Note all helpers
|
148
|
-
# should go through
|
155
|
+
# should go through Sidekiq::Job#client_push.
|
149
156
|
#
|
150
157
|
# Example usage:
|
151
|
-
# Sidekiq::Client.enqueue(
|
158
|
+
# Sidekiq::Client.enqueue(MyJob, 'foo', 1, :bat => 'bar')
|
152
159
|
#
|
153
160
|
# Messages are enqueued to the 'default' queue.
|
154
161
|
#
|
@@ -157,14 +164,14 @@ module Sidekiq
|
|
157
164
|
end
|
158
165
|
|
159
166
|
# Example usage:
|
160
|
-
# Sidekiq::Client.enqueue_to(:queue_name,
|
167
|
+
# Sidekiq::Client.enqueue_to(:queue_name, MyJob, 'foo', 1, :bat => 'bar')
|
161
168
|
#
|
162
169
|
def enqueue_to(queue, klass, *args)
|
163
170
|
klass.client_push("queue" => queue, "class" => klass, "args" => args)
|
164
171
|
end
|
165
172
|
|
166
173
|
# Example usage:
|
167
|
-
# Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes,
|
174
|
+
# Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyJob, 'foo', 1, :bat => 'bar')
|
168
175
|
#
|
169
176
|
def enqueue_to_in(queue, interval, klass, *args)
|
170
177
|
int = interval.to_f
|
@@ -178,7 +185,7 @@ module Sidekiq
|
|
178
185
|
end
|
179
186
|
|
180
187
|
# Example usage:
|
181
|
-
# Sidekiq::Client.enqueue_in(3.minutes,
|
188
|
+
# Sidekiq::Client.enqueue_in(3.minutes, MyJob, 'foo', 1, :bat => 'bar')
|
182
189
|
#
|
183
190
|
def enqueue_in(interval, klass, *args)
|
184
191
|
klass.perform_in(interval, *args)
|
@@ -189,8 +196,23 @@ module Sidekiq
|
|
189
196
|
|
190
197
|
def raw_push(payloads)
|
191
198
|
@redis_pool.with do |conn|
|
192
|
-
|
193
|
-
|
199
|
+
retryable = true
|
200
|
+
begin
|
201
|
+
conn.pipelined do |pipeline|
|
202
|
+
atomic_push(pipeline, payloads)
|
203
|
+
end
|
204
|
+
rescue Redis::BaseError => ex
|
205
|
+
# 2550 Failover can cause the server to become a replica, need
|
206
|
+
# to disconnect and reopen the socket to get back to the primary.
|
207
|
+
# 4495 Use the same logic if we have a "Not enough replicas" error from the primary
|
208
|
+
# 4985 Use the same logic when a blocking command is force-unblocked
|
209
|
+
# The retry logic is copied from sidekiq.rb
|
210
|
+
if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
|
211
|
+
conn.disconnect!
|
212
|
+
retryable = false
|
213
|
+
retry
|
214
|
+
end
|
215
|
+
raise
|
194
216
|
end
|
195
217
|
end
|
196
218
|
true
|
@@ -213,13 +235,5 @@ module Sidekiq
|
|
213
235
|
conn.lpush("queue:#{queue}", to_push)
|
214
236
|
end
|
215
237
|
end
|
216
|
-
|
217
|
-
def process_single(worker_class, item)
|
218
|
-
queue = item["queue"]
|
219
|
-
|
220
|
-
middleware.invoke(worker_class, item, queue, @redis_pool) do
|
221
|
-
item
|
222
|
-
end
|
223
|
-
end
|
224
238
|
end
|
225
239
|
end
|
data/lib/sidekiq/delay.rb
CHANGED
@@ -3,7 +3,7 @@
|
|
3
3
|
module Sidekiq
|
4
4
|
module Extensions
|
5
5
|
def self.enable_delay!
|
6
|
-
|
6
|
+
warn "Sidekiq's Delayed Extensions will be removed in Sidekiq 7.0", uplevel: 1
|
7
7
|
|
8
8
|
if defined?(::ActiveSupport)
|
9
9
|
require "sidekiq/extensions/active_record"
|
@@ -16,8 +16,8 @@ module Sidekiq
|
|
16
16
|
include Sidekiq::Worker
|
17
17
|
|
18
18
|
def perform(yml)
|
19
|
-
(target, method_name, args
|
20
|
-
msg =
|
19
|
+
(target, method_name, args) = YAML.load(yml)
|
20
|
+
msg = target.public_send(method_name, *args)
|
21
21
|
# The email method can return nil, which causes ActionMailer to return
|
22
22
|
# an undeliverable empty message.
|
23
23
|
if msg
|
@@ -18,8 +18,8 @@ module Sidekiq
|
|
18
18
|
include Sidekiq::Worker
|
19
19
|
|
20
20
|
def perform(yml)
|
21
|
-
(target, method_name, args
|
22
|
-
|
21
|
+
(target, method_name, args) = YAML.load(yml)
|
22
|
+
target.__send__(method_name, *args)
|
23
23
|
end
|
24
24
|
end
|
25
25
|
|
@@ -16,8 +16,8 @@ module Sidekiq
|
|
16
16
|
include Sidekiq::Worker
|
17
17
|
|
18
18
|
def perform(yml)
|
19
|
-
(target, method_name, args
|
20
|
-
|
19
|
+
(target, method_name, args) = YAML.load(yml)
|
20
|
+
target.__send__(method_name, *args)
|
21
21
|
end
|
22
22
|
end
|
23
23
|
|
@@ -10,16 +10,16 @@ module Sidekiq
|
|
10
10
|
def initialize(performable, target, options = {})
|
11
11
|
@performable = performable
|
12
12
|
@target = target
|
13
|
-
@opts = options
|
13
|
+
@opts = options.transform_keys(&:to_s)
|
14
14
|
end
|
15
15
|
|
16
|
-
def method_missing(name, *args
|
16
|
+
def method_missing(name, *args)
|
17
17
|
# Sidekiq has a limitation in that its message must be JSON.
|
18
18
|
# JSON can't round trip real Ruby objects so we use YAML to
|
19
19
|
# serialize the objects to a String. The YAML will be converted
|
20
20
|
# to JSON and then deserialized on the other side back into a
|
21
21
|
# Ruby object.
|
22
|
-
obj = [@target, name, args
|
22
|
+
obj = [@target, name, args]
|
23
23
|
marshalled = ::YAML.dump(obj)
|
24
24
|
if marshalled.size > SIZE_LIMIT
|
25
25
|
::Sidekiq.logger.warn { "#{@target}.#{name} job argument is #{marshalled.bytesize} bytes, you should refactor it to reduce the size" }
|
data/lib/sidekiq/fetch.rb
CHANGED
data/lib/sidekiq/job_logger.rb
CHANGED
@@ -12,46 +12,34 @@ module Sidekiq
|
|
12
12
|
|
13
13
|
yield
|
14
14
|
|
15
|
-
|
16
|
-
|
17
|
-
end
|
15
|
+
Sidekiq::Context.add(:elapsed, elapsed(start))
|
16
|
+
@logger.info("done")
|
18
17
|
rescue Exception
|
19
|
-
|
20
|
-
|
21
|
-
end
|
18
|
+
Sidekiq::Context.add(:elapsed, elapsed(start))
|
19
|
+
@logger.info("fail")
|
22
20
|
|
23
21
|
raise
|
24
22
|
end
|
25
23
|
|
26
24
|
def prepare(job_hash, &block)
|
27
|
-
level = job_hash["log_level"]
|
28
|
-
if level
|
29
|
-
@logger.log_at(level) do
|
30
|
-
Sidekiq::Context.with(job_hash_context(job_hash), &block)
|
31
|
-
end
|
32
|
-
else
|
33
|
-
Sidekiq::Context.with(job_hash_context(job_hash), &block)
|
34
|
-
end
|
35
|
-
end
|
36
|
-
|
37
|
-
def job_hash_context(job_hash)
|
38
25
|
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
39
26
|
# attribute to expose the underlying thing.
|
40
27
|
h = {
|
41
28
|
class: job_hash["display_class"] || job_hash["wrapped"] || job_hash["class"],
|
42
29
|
jid: job_hash["jid"]
|
43
30
|
}
|
44
|
-
h[:bid] = job_hash["bid"] if job_hash
|
45
|
-
h[:tags] = job_hash["tags"] if job_hash
|
46
|
-
h
|
47
|
-
end
|
48
|
-
|
49
|
-
def with_elapsed_time_context(start, &block)
|
50
|
-
Sidekiq::Context.with(elapsed_time_context(start), &block)
|
51
|
-
end
|
31
|
+
h[:bid] = job_hash["bid"] if job_hash.has_key?("bid")
|
32
|
+
h[:tags] = job_hash["tags"] if job_hash.has_key?("tags")
|
52
33
|
|
53
|
-
|
54
|
-
|
34
|
+
Thread.current[:sidekiq_context] = h
|
35
|
+
level = job_hash["log_level"]
|
36
|
+
if level
|
37
|
+
@logger.log_at(level, &block)
|
38
|
+
else
|
39
|
+
yield
|
40
|
+
end
|
41
|
+
ensure
|
42
|
+
Thread.current[:sidekiq_context] = nil
|
55
43
|
end
|
56
44
|
|
57
45
|
private
|
data/lib/sidekiq/job_retry.rb
CHANGED
@@ -25,11 +25,11 @@ module Sidekiq
|
|
25
25
|
#
|
26
26
|
# A job looks like:
|
27
27
|
#
|
28
|
-
# { 'class' => '
|
28
|
+
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
|
29
29
|
#
|
30
30
|
# The 'retry' option also accepts a number (in place of 'true'):
|
31
31
|
#
|
32
|
-
# { 'class' => '
|
32
|
+
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
|
33
33
|
#
|
34
34
|
# The job will be retried this number of times before giving up. (If simply
|
35
35
|
# 'true', Sidekiq retries 25 times)
|
@@ -53,11 +53,11 @@ module Sidekiq
|
|
53
53
|
#
|
54
54
|
# Sidekiq.options[:max_retries] = 7
|
55
55
|
#
|
56
|
-
# or limit the number of retries for a particular
|
56
|
+
# or limit the number of retries for a particular job and send retries to
|
57
57
|
# a low priority queue with:
|
58
58
|
#
|
59
|
-
# class
|
60
|
-
# include Sidekiq::
|
59
|
+
# class MyJob
|
60
|
+
# include Sidekiq::Job
|
61
61
|
# sidekiq_options retry: 10, retry_queue: 'low'
|
62
62
|
# end
|
63
63
|
#
|
@@ -76,7 +76,7 @@ module Sidekiq
|
|
76
76
|
|
77
77
|
# The global retry handler requires only the barest of data.
|
78
78
|
# We want to be able to retry as much as possible so we don't
|
79
|
-
# require the
|
79
|
+
# require the job to be instantiated.
|
80
80
|
def global(jobstr, queue)
|
81
81
|
yield
|
82
82
|
rescue Handled => ex
|
@@ -103,14 +103,14 @@ module Sidekiq
|
|
103
103
|
end
|
104
104
|
|
105
105
|
# The local retry support means that any errors that occur within
|
106
|
-
# this block can be associated with the given
|
106
|
+
# this block can be associated with the given job instance.
|
107
107
|
# This is required to support the `sidekiq_retries_exhausted` block.
|
108
108
|
#
|
109
109
|
# Note that any exception from the block is wrapped in the Skip
|
110
110
|
# exception so the global block does not reprocess the error. The
|
111
111
|
# Skip exception is unwrapped within Sidekiq::Processor#process before
|
112
112
|
# calling the handle_exception handlers.
|
113
|
-
def local(
|
113
|
+
def local(jobinst, jobstr, queue)
|
114
114
|
yield
|
115
115
|
rescue Handled => ex
|
116
116
|
raise ex
|
@@ -123,11 +123,11 @@ module Sidekiq
|
|
123
123
|
|
124
124
|
msg = Sidekiq.load_json(jobstr)
|
125
125
|
if msg["retry"].nil?
|
126
|
-
msg["retry"] =
|
126
|
+
msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
|
127
127
|
end
|
128
128
|
|
129
129
|
raise e unless msg["retry"]
|
130
|
-
attempt_retry(
|
130
|
+
attempt_retry(jobinst, msg, queue, e)
|
131
131
|
# We've handled this error associated with this job, don't
|
132
132
|
# need to handle it at the global level
|
133
133
|
raise Skip
|
@@ -135,10 +135,10 @@ module Sidekiq
|
|
135
135
|
|
136
136
|
private
|
137
137
|
|
138
|
-
# Note that +
|
139
|
-
# instantiate the
|
138
|
+
# Note that +jobinst+ can be nil here if an error is raised before we can
|
139
|
+
# instantiate the job instance. All access must be guarded and
|
140
140
|
# best effort.
|
141
|
-
def attempt_retry(
|
141
|
+
def attempt_retry(jobinst, msg, queue, exception)
|
142
142
|
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
|
143
143
|
|
144
144
|
msg["queue"] = (msg["retry_queue"] || queue)
|
@@ -170,7 +170,7 @@ module Sidekiq
|
|
170
170
|
end
|
171
171
|
|
172
172
|
if count < max_retry_attempts
|
173
|
-
delay = delay_for(
|
173
|
+
delay = delay_for(jobinst, count, exception)
|
174
174
|
# Logging here can break retries if the logging device raises ENOSPC #3979
|
175
175
|
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
176
176
|
retry_at = Time.now.to_f + delay
|
@@ -180,13 +180,13 @@ module Sidekiq
|
|
180
180
|
end
|
181
181
|
else
|
182
182
|
# Goodbye dear message, you (re)tried your best I'm sure.
|
183
|
-
retries_exhausted(
|
183
|
+
retries_exhausted(jobinst, msg, exception)
|
184
184
|
end
|
185
185
|
end
|
186
186
|
|
187
|
-
def retries_exhausted(
|
187
|
+
def retries_exhausted(jobinst, msg, exception)
|
188
188
|
begin
|
189
|
-
block =
|
189
|
+
block = jobinst&.sidekiq_retries_exhausted_block
|
190
190
|
block&.call(msg, exception)
|
191
191
|
rescue => e
|
192
192
|
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
|
@@ -215,19 +215,19 @@ module Sidekiq
|
|
215
215
|
end
|
216
216
|
end
|
217
217
|
|
218
|
-
def delay_for(
|
218
|
+
def delay_for(jobinst, count, exception)
|
219
219
|
jitter = rand(10) * (count + 1)
|
220
|
-
if
|
221
|
-
custom_retry_in = retry_in(
|
220
|
+
if jobinst&.sidekiq_retry_in_block
|
221
|
+
custom_retry_in = retry_in(jobinst, count, exception).to_i
|
222
222
|
return custom_retry_in + jitter if custom_retry_in > 0
|
223
223
|
end
|
224
224
|
(count**4) + 15 + jitter
|
225
225
|
end
|
226
226
|
|
227
|
-
def retry_in(
|
228
|
-
|
227
|
+
def retry_in(jobinst, count, exception)
|
228
|
+
jobinst.sidekiq_retry_in_block.call(count, exception)
|
229
229
|
rescue Exception => e
|
230
|
-
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{
|
230
|
+
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
|
231
231
|
nil
|
232
232
|
end
|
233
233
|
|
data/lib/sidekiq/job_util.rb
CHANGED
@@ -12,16 +12,19 @@ module Sidekiq
|
|
12
12
|
raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
|
13
13
|
raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
|
14
14
|
raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
|
15
|
+
end
|
15
16
|
|
17
|
+
def verify_json(item)
|
18
|
+
job_class = item["wrapped"] || item["class"]
|
16
19
|
if Sidekiq.options[:on_complex_arguments] == :raise
|
17
20
|
msg = <<~EOM
|
18
|
-
Job arguments to #{
|
21
|
+
Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
|
19
22
|
To disable this error, remove `Sidekiq.strict_args!` from your initializer.
|
20
23
|
EOM
|
21
24
|
raise(ArgumentError, msg) unless json_safe?(item)
|
22
25
|
elsif Sidekiq.options[:on_complex_arguments] == :warn
|
23
26
|
Sidekiq.logger.warn <<~EOM unless json_safe?(item)
|
24
|
-
Job arguments to #{
|
27
|
+
Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
|
25
28
|
Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
|
26
29
|
by calling `Sidekiq.strict_args!` during Sidekiq initialization.
|
27
30
|
EOM
|
@@ -39,20 +42,19 @@ module Sidekiq
|
|
39
42
|
|
40
43
|
raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
|
41
44
|
|
45
|
+
item["jid"] ||= SecureRandom.hex(12)
|
42
46
|
item["class"] = item["class"].to_s
|
43
47
|
item["queue"] = item["queue"].to_s
|
44
|
-
item["jid"] ||= SecureRandom.hex(12)
|
45
48
|
item["created_at"] ||= Time.now.to_f
|
46
|
-
|
47
49
|
item
|
48
50
|
end
|
49
51
|
|
50
52
|
def normalized_hash(item_class)
|
51
53
|
if item_class.is_a?(Class)
|
52
|
-
raise(ArgumentError, "Message must include a Sidekiq::
|
54
|
+
raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
|
53
55
|
item_class.get_sidekiq_options
|
54
56
|
else
|
55
|
-
Sidekiq.
|
57
|
+
Sidekiq.default_job_options
|
56
58
|
end
|
57
59
|
end
|
58
60
|
|
data/lib/sidekiq/launcher.rb
CHANGED
@@ -15,7 +15,7 @@ module Sidekiq
|
|
15
15
|
proc { "sidekiq" },
|
16
16
|
proc { Sidekiq::VERSION },
|
17
17
|
proc { |me, data| data["tag"] },
|
18
|
-
proc { |me, data| "[#{Processor::
|
18
|
+
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{data["concurrency"]} busy]" },
|
19
19
|
proc { |me, data| "stopping" if me.stopping? }
|
20
20
|
]
|
21
21
|
|
@@ -43,9 +43,7 @@ module Sidekiq
|
|
43
43
|
@poller.terminate
|
44
44
|
end
|
45
45
|
|
46
|
-
# Shuts down
|
47
|
-
# return until all work is complete and cleaned up.
|
48
|
-
# It can take up to the timeout to complete.
|
46
|
+
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
|
49
47
|
def stop
|
50
48
|
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
|
51
49
|
|
@@ -55,7 +53,7 @@ module Sidekiq
|
|
55
53
|
|
56
54
|
@manager.stop(deadline)
|
57
55
|
|
58
|
-
# Requeue everything in case there was a
|
56
|
+
# Requeue everything in case there was a thread which fetched a job while the process was stopped.
|
59
57
|
# This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
|
60
58
|
strategy = @options[:fetch]
|
61
59
|
strategy.bulk_requeue([], @options)
|
@@ -84,9 +82,9 @@ module Sidekiq
|
|
84
82
|
# Note we don't stop the heartbeat thread; if the process
|
85
83
|
# doesn't actually exit, it'll reappear in the Web UI.
|
86
84
|
Sidekiq.redis do |conn|
|
87
|
-
conn.pipelined do
|
88
|
-
|
89
|
-
|
85
|
+
conn.pipelined do |pipeline|
|
86
|
+
pipeline.srem("processes", identity)
|
87
|
+
pipeline.unlink("#{identity}:work")
|
90
88
|
end
|
91
89
|
end
|
92
90
|
rescue
|
@@ -107,14 +105,14 @@ module Sidekiq
|
|
107
105
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
108
106
|
begin
|
109
107
|
Sidekiq.redis do |conn|
|
110
|
-
conn.pipelined do
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
108
|
+
conn.pipelined do |pipeline|
|
109
|
+
pipeline.incrby("stat:processed", procd)
|
110
|
+
pipeline.incrby("stat:processed:#{nowdate}", procd)
|
111
|
+
pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
|
112
|
+
|
113
|
+
pipeline.incrby("stat:failed", fails)
|
114
|
+
pipeline.incrby("stat:failed:#{nowdate}", fails)
|
115
|
+
pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
|
118
116
|
end
|
119
117
|
end
|
120
118
|
rescue => ex
|
@@ -132,26 +130,29 @@ module Sidekiq
|
|
132
130
|
begin
|
133
131
|
fails = Processor::FAILURE.reset
|
134
132
|
procd = Processor::PROCESSED.reset
|
135
|
-
curstate = Processor::
|
133
|
+
curstate = Processor::WORK_STATE.dup
|
136
134
|
|
137
|
-
workers_key = "#{key}:workers"
|
138
135
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
139
136
|
|
140
137
|
Sidekiq.redis do |conn|
|
141
|
-
conn.multi do
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
138
|
+
conn.multi do |transaction|
|
139
|
+
transaction.incrby("stat:processed", procd)
|
140
|
+
transaction.incrby("stat:processed:#{nowdate}", procd)
|
141
|
+
transaction.expire("stat:processed:#{nowdate}", STATS_TTL)
|
142
|
+
|
143
|
+
transaction.incrby("stat:failed", fails)
|
144
|
+
transaction.incrby("stat:failed:#{nowdate}", fails)
|
145
|
+
transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
|
146
|
+
end
|
149
147
|
|
150
|
-
|
148
|
+
# work is the current set of executing jobs
|
149
|
+
work_key = "#{key}:work"
|
150
|
+
conn.pipelined do |transaction|
|
151
|
+
transaction.unlink(work_key)
|
151
152
|
curstate.each_pair do |tid, hash|
|
152
|
-
|
153
|
+
transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
|
153
154
|
end
|
154
|
-
|
155
|
+
transaction.expire(work_key, 60)
|
155
156
|
end
|
156
157
|
end
|
157
158
|
|
@@ -161,17 +162,17 @@ module Sidekiq
|
|
161
162
|
kb = memory_usage(::Process.pid)
|
162
163
|
|
163
164
|
_, exists, _, _, msg = Sidekiq.redis { |conn|
|
164
|
-
conn.multi {
|
165
|
-
|
166
|
-
|
167
|
-
|
165
|
+
conn.multi { |transaction|
|
166
|
+
transaction.sadd("processes", key)
|
167
|
+
transaction.exists?(key)
|
168
|
+
transaction.hmset(key, "info", to_json,
|
168
169
|
"busy", curstate.size,
|
169
170
|
"beat", Time.now.to_f,
|
170
171
|
"rtt_us", rtt,
|
171
172
|
"quiet", @done,
|
172
173
|
"rss", kb)
|
173
|
-
|
174
|
-
|
174
|
+
transaction.expire(key, 60)
|
175
|
+
transaction.rpop("#{key}-signals")
|
175
176
|
}
|
176
177
|
}
|
177
178
|
|
@@ -214,7 +215,7 @@ module Sidekiq
|
|
214
215
|
Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
|
215
216
|
Ensure Redis is running in the same AZ or datacenter as Sidekiq.
|
216
217
|
If these values are close to 100,000, that means your Sidekiq process may be
|
217
|
-
CPU
|
218
|
+
CPU-saturated; reduce your concurrency and/or see https://github.com/mperham/sidekiq/discussions/5039
|
218
219
|
EOM
|
219
220
|
RTT_READINGS.reset
|
220
221
|
end
|
data/lib/sidekiq/logger.rb
CHANGED
@@ -16,6 +16,10 @@ module Sidekiq
|
|
16
16
|
def self.current
|
17
17
|
Thread.current[:sidekiq_context] ||= {}
|
18
18
|
end
|
19
|
+
|
20
|
+
def self.add(k, v)
|
21
|
+
Thread.current[:sidekiq_context][k] = v
|
22
|
+
end
|
19
23
|
end
|
20
24
|
|
21
25
|
module LoggingUtils
|
@@ -31,24 +35,10 @@ module Sidekiq
|
|
31
35
|
nil
|
32
36
|
end
|
33
37
|
|
34
|
-
|
35
|
-
level
|
36
|
-
|
37
|
-
|
38
|
-
def info?
|
39
|
-
level <= 1
|
40
|
-
end
|
41
|
-
|
42
|
-
def warn?
|
43
|
-
level <= 2
|
44
|
-
end
|
45
|
-
|
46
|
-
def error?
|
47
|
-
level <= 3
|
48
|
-
end
|
49
|
-
|
50
|
-
def fatal?
|
51
|
-
level <= 4
|
38
|
+
LEVELS.each do |level, numeric_level|
|
39
|
+
define_method("#{level}?") do
|
40
|
+
local_level.nil? ? super() : local_level <= numeric_level
|
41
|
+
end
|
52
42
|
end
|
53
43
|
|
54
44
|
def local_level
|