sidekiq 5.2.5 → 6.0.3
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/.circleci/config.yml +82 -0
- data/.gitignore +0 -2
- data/.standard.yml +20 -0
- data/6.0-Upgrade.md +72 -0
- data/COMM-LICENSE +11 -9
- data/Changes.md +136 -0
- data/Ent-2.0-Upgrade.md +37 -0
- data/Ent-Changes.md +32 -1
- data/Gemfile +12 -17
- data/Gemfile.lock +196 -0
- data/Pro-5.0-Upgrade.md +25 -0
- data/Pro-Changes.md +26 -2
- data/README.md +19 -31
- data/Rakefile +5 -4
- data/bin/sidekiqload +33 -25
- data/bin/sidekiqmon +8 -0
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
- data/lib/generators/sidekiq/worker_generator.rb +20 -12
- data/lib/sidekiq/api.rb +230 -214
- data/lib/sidekiq/cli.rb +111 -174
- data/lib/sidekiq/client.rb +55 -46
- data/lib/sidekiq/delay.rb +5 -6
- data/lib/sidekiq/exception_handler.rb +10 -12
- data/lib/sidekiq/extensions/action_mailer.rb +10 -20
- data/lib/sidekiq/extensions/active_record.rb +9 -7
- data/lib/sidekiq/extensions/class_methods.rb +9 -7
- data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
- data/lib/sidekiq/fetch.rb +11 -12
- data/lib/sidekiq/job_logger.rb +45 -7
- data/lib/sidekiq/job_retry.rb +71 -60
- data/lib/sidekiq/launcher.rb +57 -51
- data/lib/sidekiq/logger.rb +165 -0
- data/lib/sidekiq/manager.rb +7 -9
- data/lib/sidekiq/middleware/chain.rb +14 -4
- data/lib/sidekiq/middleware/i18n.rb +5 -7
- data/lib/sidekiq/monitor.rb +133 -0
- data/lib/sidekiq/paginator.rb +18 -14
- data/lib/sidekiq/processor.rb +83 -75
- data/lib/sidekiq/rails.rb +23 -29
- data/lib/sidekiq/redis_connection.rb +31 -37
- data/lib/sidekiq/scheduled.rb +28 -29
- data/lib/sidekiq/testing/inline.rb +2 -1
- data/lib/sidekiq/testing.rb +34 -23
- data/lib/sidekiq/util.rb +17 -16
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/action.rb +14 -10
- data/lib/sidekiq/web/application.rb +64 -66
- data/lib/sidekiq/web/helpers.rb +89 -71
- data/lib/sidekiq/web/router.rb +17 -14
- data/lib/sidekiq/web.rb +41 -49
- data/lib/sidekiq/worker.rb +129 -97
- data/lib/sidekiq.rb +61 -42
- data/sidekiq.gemspec +16 -16
- data/web/assets/javascripts/dashboard.js +4 -23
- data/web/assets/stylesheets/application-dark.css +125 -0
- data/web/assets/stylesheets/application.css +9 -0
- data/web/assets/stylesheets/bootstrap.css +1 -1
- data/web/locales/de.yml +14 -2
- data/web/locales/ja.yml +2 -1
- data/web/views/_job_info.erb +2 -1
- data/web/views/busy.erb +4 -1
- data/web/views/dead.erb +2 -2
- data/web/views/layout.erb +1 -0
- data/web/views/morgue.erb +4 -1
- data/web/views/queue.erb +10 -1
- data/web/views/queues.erb +1 -1
- data/web/views/retries.erb +4 -1
- data/web/views/retry.erb +2 -2
- data/web/views/scheduled.erb +4 -1
- metadata +21 -32
- data/.travis.yml +0 -17
- data/Appraisals +0 -9
- data/bin/sidekiqctl +0 -237
- data/gemfiles/rails_4.gemfile +0 -31
- data/gemfiles/rails_5.gemfile +0 -31
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
data/lib/sidekiq/job_retry.rb
CHANGED
@@ -1,6 +1,10 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require
|
2
|
+
|
3
|
+
require "sidekiq/scheduled"
|
4
|
+
require "sidekiq/api"
|
5
|
+
|
6
|
+
require "zlib"
|
7
|
+
require "base64"
|
4
8
|
|
5
9
|
module Sidekiq
|
6
10
|
##
|
@@ -56,7 +60,8 @@ module Sidekiq
|
|
56
60
|
# end
|
57
61
|
#
|
58
62
|
class JobRetry
|
59
|
-
class
|
63
|
+
class Handled < ::RuntimeError; end
|
64
|
+
class Skip < Handled; end
|
60
65
|
|
61
66
|
include Sidekiq::Util
|
62
67
|
|
@@ -69,9 +74,9 @@ module Sidekiq
|
|
69
74
|
# The global retry handler requires only the barest of data.
|
70
75
|
# We want to be able to retry as much as possible so we don't
|
71
76
|
# require the worker to be instantiated.
|
72
|
-
def global(
|
77
|
+
def global(jobstr, queue)
|
73
78
|
yield
|
74
|
-
rescue
|
79
|
+
rescue Handled => ex
|
75
80
|
raise ex
|
76
81
|
rescue Sidekiq::Shutdown => ey
|
77
82
|
# ignore, will be pushed back onto queue during hard_shutdown
|
@@ -80,22 +85,20 @@ module Sidekiq
|
|
80
85
|
# ignore, will be pushed back onto queue during hard_shutdown
|
81
86
|
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
82
87
|
|
83
|
-
|
88
|
+
msg = Sidekiq.load_json(jobstr)
|
89
|
+
if msg["retry"]
|
84
90
|
attempt_retry(nil, msg, queue, e)
|
85
91
|
else
|
86
92
|
Sidekiq.death_handlers.each do |handler|
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
|
91
|
-
end
|
93
|
+
handler.call(msg, e)
|
94
|
+
rescue => handler_ex
|
95
|
+
handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
|
92
96
|
end
|
93
97
|
end
|
94
98
|
|
95
|
-
raise
|
99
|
+
raise Handled
|
96
100
|
end
|
97
101
|
|
98
|
-
|
99
102
|
# The local retry support means that any errors that occur within
|
100
103
|
# this block can be associated with the given worker instance.
|
101
104
|
# This is required to support the `sidekiq_retries_exhausted` block.
|
@@ -104,9 +107,9 @@ module Sidekiq
|
|
104
107
|
# exception so the global block does not reprocess the error. The
|
105
108
|
# Skip exception is unwrapped within Sidekiq::Processor#process before
|
106
109
|
# calling the handle_exception handlers.
|
107
|
-
def local(worker,
|
110
|
+
def local(worker, jobstr, queue)
|
108
111
|
yield
|
109
|
-
rescue
|
112
|
+
rescue Handled => ex
|
110
113
|
raise ex
|
111
114
|
rescue Sidekiq::Shutdown => ey
|
112
115
|
# ignore, will be pushed back onto queue during hard_shutdown
|
@@ -115,11 +118,12 @@ module Sidekiq
|
|
115
118
|
# ignore, will be pushed back onto queue during hard_shutdown
|
116
119
|
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
117
120
|
|
118
|
-
|
119
|
-
|
121
|
+
msg = Sidekiq.load_json(jobstr)
|
122
|
+
if msg["retry"].nil?
|
123
|
+
msg["retry"] = worker.class.get_sidekiq_options["retry"]
|
120
124
|
end
|
121
125
|
|
122
|
-
raise e unless msg[
|
126
|
+
raise e unless msg["retry"]
|
123
127
|
attempt_retry(worker, msg, queue, e)
|
124
128
|
# We've handled this error associated with this job, don't
|
125
129
|
# need to handle it at the global level
|
@@ -132,48 +136,44 @@ module Sidekiq
|
|
132
136
|
# instantiate the worker instance. All access must be guarded and
|
133
137
|
# best effort.
|
134
138
|
def attempt_retry(worker, msg, queue, exception)
|
135
|
-
max_retry_attempts = retry_attempts_from(msg[
|
139
|
+
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
|
136
140
|
|
137
|
-
msg[
|
138
|
-
msg['retry_queue']
|
139
|
-
else
|
140
|
-
queue
|
141
|
-
end
|
141
|
+
msg["queue"] = (msg["retry_queue"] || queue)
|
142
142
|
|
143
|
-
|
144
|
-
# that won't convert to JSON.
|
145
|
-
m = exception.message.to_s[0, 10_000]
|
143
|
+
m = exception_message(exception)
|
146
144
|
if m.respond_to?(:scrub!)
|
147
145
|
m.force_encoding("utf-8")
|
148
146
|
m.scrub!
|
149
147
|
end
|
150
148
|
|
151
|
-
msg[
|
152
|
-
msg[
|
153
|
-
count = if msg[
|
154
|
-
msg[
|
155
|
-
msg[
|
149
|
+
msg["error_message"] = m
|
150
|
+
msg["error_class"] = exception.class.name
|
151
|
+
count = if msg["retry_count"]
|
152
|
+
msg["retried_at"] = Time.now.to_f
|
153
|
+
msg["retry_count"] += 1
|
156
154
|
else
|
157
|
-
msg[
|
158
|
-
msg[
|
155
|
+
msg["failed_at"] = Time.now.to_f
|
156
|
+
msg["retry_count"] = 0
|
159
157
|
end
|
160
158
|
|
161
|
-
if msg[
|
162
|
-
msg[
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
159
|
+
if msg["backtrace"]
|
160
|
+
lines = if msg["backtrace"] == true
|
161
|
+
exception.backtrace
|
162
|
+
else
|
163
|
+
exception.backtrace[0...msg["backtrace"].to_i]
|
164
|
+
end
|
165
|
+
|
166
|
+
msg["error_backtrace"] = compress_backtrace(lines)
|
167
167
|
end
|
168
168
|
|
169
169
|
if count < max_retry_attempts
|
170
170
|
delay = delay_for(worker, count, exception)
|
171
171
|
# Logging here can break retries if the logging device raises ENOSPC #3979
|
172
|
-
#logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
172
|
+
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
173
173
|
retry_at = Time.now.to_f + delay
|
174
174
|
payload = Sidekiq.dump_json(msg)
|
175
175
|
Sidekiq.redis do |conn|
|
176
|
-
conn.zadd(
|
176
|
+
conn.zadd("retry", retry_at.to_s, payload)
|
177
177
|
end
|
178
178
|
else
|
179
179
|
# Goodbye dear message, you (re)tried your best I'm sure.
|
@@ -183,25 +183,23 @@ module Sidekiq
|
|
183
183
|
|
184
184
|
def retries_exhausted(worker, msg, exception)
|
185
185
|
begin
|
186
|
-
block = worker
|
187
|
-
block
|
186
|
+
block = worker&.sidekiq_retries_exhausted_block
|
187
|
+
block&.call(msg, exception)
|
188
188
|
rescue => e
|
189
|
-
handle_exception(e, {
|
189
|
+
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
|
190
190
|
end
|
191
191
|
|
192
192
|
Sidekiq.death_handlers.each do |handler|
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
handle_exception(e, { context: "Error calling death handler", job: msg })
|
197
|
-
end
|
193
|
+
handler.call(msg, exception)
|
194
|
+
rescue => e
|
195
|
+
handle_exception(e, {context: "Error calling death handler", job: msg})
|
198
196
|
end
|
199
197
|
|
200
|
-
send_to_morgue(msg) unless msg[
|
198
|
+
send_to_morgue(msg) unless msg["dead"] == false
|
201
199
|
end
|
202
200
|
|
203
201
|
def send_to_morgue(msg)
|
204
|
-
logger.info { "Adding dead #{msg[
|
202
|
+
logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
|
205
203
|
payload = Sidekiq.dump_json(msg)
|
206
204
|
DeadSet.new.kill(payload, notify_failure: false)
|
207
205
|
end
|
@@ -215,7 +213,7 @@ module Sidekiq
|
|
215
213
|
end
|
216
214
|
|
217
215
|
def delay_for(worker, count, exception)
|
218
|
-
if worker
|
216
|
+
if worker&.sidekiq_retry_in_block
|
219
217
|
custom_retry_in = retry_in(worker, count, exception).to_i
|
220
218
|
return custom_retry_in if custom_retry_in > 0
|
221
219
|
end
|
@@ -224,16 +222,14 @@ module Sidekiq
|
|
224
222
|
|
225
223
|
# delayed_job uses the same basic formula
|
226
224
|
def seconds_to_delay(count)
|
227
|
-
(count
|
225
|
+
(count**4) + 15 + (rand(30) * (count + 1))
|
228
226
|
end
|
229
227
|
|
230
228
|
def retry_in(worker, count, exception)
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
nil
|
236
|
-
end
|
229
|
+
worker.sidekiq_retry_in_block.call(count, exception)
|
230
|
+
rescue Exception => e
|
231
|
+
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
|
232
|
+
nil
|
237
233
|
end
|
238
234
|
|
239
235
|
def exception_caused_by_shutdown?(e, checked_causes = [])
|
@@ -247,5 +243,20 @@ module Sidekiq
|
|
247
243
|
exception_caused_by_shutdown?(e.cause, checked_causes)
|
248
244
|
end
|
249
245
|
|
246
|
+
# Extract message from exception.
|
247
|
+
# Set a default if the message raises an error
|
248
|
+
def exception_message(exception)
|
249
|
+
# App code can stuff all sorts of crazy binary data into the error message
|
250
|
+
# that won't convert to JSON.
|
251
|
+
exception.message.to_s[0, 10_000]
|
252
|
+
rescue
|
253
|
+
+"!!! ERROR MESSAGE THREW AN ERROR !!!"
|
254
|
+
end
|
255
|
+
|
256
|
+
def compress_backtrace(backtrace)
|
257
|
+
serialized = Sidekiq.dump_json(backtrace)
|
258
|
+
compressed = Zlib::Deflate.deflate(serialized)
|
259
|
+
Base64.encode64(compressed)
|
260
|
+
end
|
250
261
|
end
|
251
262
|
end
|
data/lib/sidekiq/launcher.rb
CHANGED
@@ -1,19 +1,25 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require
|
4
|
-
require
|
2
|
+
|
3
|
+
require "sidekiq/manager"
|
4
|
+
require "sidekiq/fetch"
|
5
|
+
require "sidekiq/scheduled"
|
5
6
|
|
6
7
|
module Sidekiq
|
7
|
-
# The Launcher
|
8
|
-
# start, monitor and stop the core Actors in Sidekiq.
|
9
|
-
# If any of these actors die, the Sidekiq process exits
|
10
|
-
# immediately.
|
8
|
+
# The Launcher starts the Manager and Poller threads and provides the process heartbeat.
|
11
9
|
class Launcher
|
12
10
|
include Util
|
13
11
|
|
14
|
-
|
12
|
+
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
|
15
13
|
|
16
|
-
|
14
|
+
PROCTITLES = [
|
15
|
+
proc { "sidekiq" },
|
16
|
+
proc { Sidekiq::VERSION },
|
17
|
+
proc { |me, data| data["tag"] },
|
18
|
+
proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
|
19
|
+
proc { |me, data| "stopping" if me.stopping? },
|
20
|
+
]
|
21
|
+
|
22
|
+
attr_accessor :manager, :poller, :fetcher
|
17
23
|
|
18
24
|
def initialize(options)
|
19
25
|
@manager = Sidekiq::Manager.new(options)
|
@@ -62,10 +68,30 @@ module Sidekiq
|
|
62
68
|
|
63
69
|
private unless $TESTING
|
64
70
|
|
71
|
+
def start_heartbeat
|
72
|
+
loop do
|
73
|
+
heartbeat
|
74
|
+
sleep 5
|
75
|
+
end
|
76
|
+
Sidekiq.logger.info("Heartbeat stopping...")
|
77
|
+
end
|
78
|
+
|
79
|
+
def clear_heartbeat
|
80
|
+
# Remove record from Redis since we are shutting down.
|
81
|
+
# Note we don't stop the heartbeat thread; if the process
|
82
|
+
# doesn't actually exit, it'll reappear in the Web UI.
|
83
|
+
Sidekiq.redis do |conn|
|
84
|
+
conn.pipelined do
|
85
|
+
conn.srem("processes", identity)
|
86
|
+
conn.del("#{identity}:workers")
|
87
|
+
end
|
88
|
+
end
|
89
|
+
rescue
|
90
|
+
# best effort, ignore network errors
|
91
|
+
end
|
92
|
+
|
65
93
|
def heartbeat
|
66
|
-
|
67
|
-
results.compact!
|
68
|
-
$0 = results.join(' ')
|
94
|
+
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
|
69
95
|
|
70
96
|
❤
|
71
97
|
end
|
@@ -73,6 +99,7 @@ module Sidekiq
|
|
73
99
|
def ❤
|
74
100
|
key = identity
|
75
101
|
fails = procd = 0
|
102
|
+
|
76
103
|
begin
|
77
104
|
fails = Processor::FAILURE.reset
|
78
105
|
procd = Processor::PROCESSED.reset
|
@@ -80,6 +107,7 @@ module Sidekiq
|
|
80
107
|
|
81
108
|
workers_key = "#{key}:workers"
|
82
109
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
110
|
+
|
83
111
|
Sidekiq.redis do |conn|
|
84
112
|
conn.multi do
|
85
113
|
conn.incrby("stat:processed", procd)
|
@@ -97,24 +125,25 @@ module Sidekiq
|
|
97
125
|
conn.expire(workers_key, 60)
|
98
126
|
end
|
99
127
|
end
|
128
|
+
|
100
129
|
fails = procd = 0
|
101
130
|
|
102
|
-
_, exists, _, _, msg = Sidekiq.redis
|
103
|
-
conn.multi
|
104
|
-
conn.sadd(
|
131
|
+
_, exists, _, _, msg = Sidekiq.redis { |conn|
|
132
|
+
conn.multi {
|
133
|
+
conn.sadd("processes", key)
|
105
134
|
conn.exists(key)
|
106
|
-
conn.hmset(key,
|
135
|
+
conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
|
107
136
|
conn.expire(key, 60)
|
108
137
|
conn.rpop("#{key}-signals")
|
109
|
-
|
110
|
-
|
138
|
+
}
|
139
|
+
}
|
111
140
|
|
112
141
|
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
|
113
|
-
fire_event(:heartbeat)
|
142
|
+
fire_event(:heartbeat) unless exists
|
114
143
|
|
115
144
|
return unless msg
|
116
145
|
|
117
|
-
::Process.kill(msg,
|
146
|
+
::Process.kill(msg, ::Process.pid)
|
118
147
|
rescue => e
|
119
148
|
# ignore all redis/network issues
|
120
149
|
logger.error("heartbeat: #{e.message}")
|
@@ -124,25 +153,17 @@ module Sidekiq
|
|
124
153
|
end
|
125
154
|
end
|
126
155
|
|
127
|
-
def start_heartbeat
|
128
|
-
while true
|
129
|
-
heartbeat
|
130
|
-
sleep 5
|
131
|
-
end
|
132
|
-
Sidekiq.logger.info("Heartbeat stopping...")
|
133
|
-
end
|
134
|
-
|
135
156
|
def to_data
|
136
157
|
@data ||= begin
|
137
158
|
{
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
159
|
+
"hostname" => hostname,
|
160
|
+
"started_at" => Time.now.to_f,
|
161
|
+
"pid" => ::Process.pid,
|
162
|
+
"tag" => @options[:tag] || "",
|
163
|
+
"concurrency" => @options[:concurrency],
|
164
|
+
"queues" => @options[:queues].uniq,
|
165
|
+
"labels" => @options[:labels],
|
166
|
+
"identity" => identity,
|
146
167
|
}
|
147
168
|
end
|
148
169
|
end
|
@@ -154,20 +175,5 @@ module Sidekiq
|
|
154
175
|
Sidekiq.dump_json(to_data)
|
155
176
|
end
|
156
177
|
end
|
157
|
-
|
158
|
-
def clear_heartbeat
|
159
|
-
# Remove record from Redis since we are shutting down.
|
160
|
-
# Note we don't stop the heartbeat thread; if the process
|
161
|
-
# doesn't actually exit, it'll reappear in the Web UI.
|
162
|
-
Sidekiq.redis do |conn|
|
163
|
-
conn.pipelined do
|
164
|
-
conn.srem('processes', identity)
|
165
|
-
conn.del("#{identity}:workers")
|
166
|
-
end
|
167
|
-
end
|
168
|
-
rescue
|
169
|
-
# best effort, ignore network errors
|
170
|
-
end
|
171
|
-
|
172
178
|
end
|
173
179
|
end
|
@@ -0,0 +1,165 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "logger"
|
4
|
+
require "time"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
module Context
|
8
|
+
def self.with(hash)
|
9
|
+
current.merge!(hash)
|
10
|
+
yield
|
11
|
+
ensure
|
12
|
+
hash.each_key { |key| current.delete(key) }
|
13
|
+
end
|
14
|
+
|
15
|
+
def self.current
|
16
|
+
Thread.current[:sidekiq_context] ||= {}
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
module LoggingUtils
|
21
|
+
LEVELS = {
|
22
|
+
"debug" => 0,
|
23
|
+
"info" => 1,
|
24
|
+
"warn" => 2,
|
25
|
+
"error" => 3,
|
26
|
+
"fatal" => 4,
|
27
|
+
}
|
28
|
+
LEVELS.default_proc = proc do |_, level|
|
29
|
+
Sidekiq.logger.warn("Invalid log level: #{level.inspect}")
|
30
|
+
nil
|
31
|
+
end
|
32
|
+
|
33
|
+
def debug?
|
34
|
+
level >= 0
|
35
|
+
end
|
36
|
+
|
37
|
+
def info?
|
38
|
+
level >= 1
|
39
|
+
end
|
40
|
+
|
41
|
+
def warn?
|
42
|
+
level >= 2
|
43
|
+
end
|
44
|
+
|
45
|
+
def error?
|
46
|
+
level >= 3
|
47
|
+
end
|
48
|
+
|
49
|
+
def fatal?
|
50
|
+
level >= 4
|
51
|
+
end
|
52
|
+
|
53
|
+
def local_level
|
54
|
+
Thread.current[:sidekiq_log_level]
|
55
|
+
end
|
56
|
+
|
57
|
+
def local_level=(level)
|
58
|
+
case level
|
59
|
+
when Integer
|
60
|
+
Thread.current[:sidekiq_log_level] = level
|
61
|
+
when Symbol, String
|
62
|
+
Thread.current[:sidekiq_log_level] = LEVELS[level.to_s]
|
63
|
+
when nil
|
64
|
+
Thread.current[:sidekiq_log_level] = nil
|
65
|
+
else
|
66
|
+
raise ArgumentError, "Invalid log level: #{level.inspect}"
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
def level
|
71
|
+
local_level || super
|
72
|
+
end
|
73
|
+
|
74
|
+
# Change the thread-local level for the duration of the given block.
|
75
|
+
def log_at(level)
|
76
|
+
old_local_level = local_level
|
77
|
+
self.local_level = level
|
78
|
+
yield
|
79
|
+
ensure
|
80
|
+
self.local_level = old_local_level
|
81
|
+
end
|
82
|
+
|
83
|
+
# Redefined to check severity against #level, and thus the thread-local level, rather than +@level+.
|
84
|
+
# FIXME: Remove when the minimum Ruby version supports overriding Logger#level.
|
85
|
+
def add(severity, message = nil, progname = nil, &block)
|
86
|
+
severity ||= UNKNOWN
|
87
|
+
progname ||= @progname
|
88
|
+
|
89
|
+
return true if @logdev.nil? || severity < level
|
90
|
+
|
91
|
+
if message.nil?
|
92
|
+
if block_given?
|
93
|
+
message = yield
|
94
|
+
else
|
95
|
+
message = progname
|
96
|
+
progname = @progname
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
@logdev.write format_message(format_severity(severity), Time.now, progname, message)
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
class Logger < ::Logger
|
105
|
+
include LoggingUtils
|
106
|
+
|
107
|
+
def initialize(*args)
|
108
|
+
super
|
109
|
+
self.formatter = Sidekiq.log_formatter
|
110
|
+
end
|
111
|
+
|
112
|
+
module Formatters
|
113
|
+
class Base < ::Logger::Formatter
|
114
|
+
def tid
|
115
|
+
Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
|
116
|
+
end
|
117
|
+
|
118
|
+
def ctx
|
119
|
+
Sidekiq::Context.current
|
120
|
+
end
|
121
|
+
|
122
|
+
def format_context
|
123
|
+
if ctx.any?
|
124
|
+
" " + ctx.compact.map { |k, v|
|
125
|
+
case v
|
126
|
+
when Array
|
127
|
+
"#{k}=#{v.join(",")}"
|
128
|
+
else
|
129
|
+
"#{k}=#{v}"
|
130
|
+
end
|
131
|
+
}.join(" ")
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
class Pretty < Base
|
137
|
+
def call(severity, time, program_name, message)
|
138
|
+
"#{time.utc.iso8601(3)} pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
class WithoutTimestamp < Pretty
|
143
|
+
def call(severity, time, program_name, message)
|
144
|
+
"pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
class JSON < Base
|
149
|
+
def call(severity, time, program_name, message)
|
150
|
+
hash = {
|
151
|
+
ts: time.utc.iso8601(3),
|
152
|
+
pid: ::Process.pid,
|
153
|
+
tid: tid,
|
154
|
+
lvl: severity,
|
155
|
+
msg: message,
|
156
|
+
}
|
157
|
+
c = ctx
|
158
|
+
hash["ctx"] = c unless c.empty?
|
159
|
+
|
160
|
+
Sidekiq.dump_json(hash) << "\n"
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
end
|
165
|
+
end
|
data/lib/sidekiq/manager.rb
CHANGED
@@ -1,12 +1,11 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
require 'sidekiq/util'
|
3
|
-
require 'sidekiq/processor'
|
4
|
-
require 'sidekiq/fetch'
|
5
|
-
require 'thread'
|
6
|
-
require 'set'
|
7
2
|
|
8
|
-
|
3
|
+
require "sidekiq/util"
|
4
|
+
require "sidekiq/processor"
|
5
|
+
require "sidekiq/fetch"
|
6
|
+
require "set"
|
9
7
|
|
8
|
+
module Sidekiq
|
10
9
|
##
|
11
10
|
# The Manager is the central coordination point in Sidekiq, controlling
|
12
11
|
# the lifecycle of the Processors.
|
@@ -27,7 +26,7 @@ module Sidekiq
|
|
27
26
|
attr_reader :workers
|
28
27
|
attr_reader :options
|
29
28
|
|
30
|
-
def initialize(options={})
|
29
|
+
def initialize(options = {})
|
31
30
|
logger.debug { options.inspect }
|
32
31
|
@options = options
|
33
32
|
@count = options[:concurrency] || 10
|
@@ -113,7 +112,7 @@ module Sidekiq
|
|
113
112
|
end
|
114
113
|
|
115
114
|
if cleanup.size > 0
|
116
|
-
jobs = cleanup.map {|p| p.job }.compact
|
115
|
+
jobs = cleanup.map { |p| p.job }.compact
|
117
116
|
|
118
117
|
logger.warn { "Terminating #{cleanup.size} busy worker threads" }
|
119
118
|
logger.warn { "Work still in progress #{jobs.inspect}" }
|
@@ -132,6 +131,5 @@ module Sidekiq
|
|
132
131
|
processor.kill
|
133
132
|
end
|
134
133
|
end
|
135
|
-
|
136
134
|
end
|
137
135
|
end
|
@@ -1,4 +1,5 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
+
|
2
3
|
module Sidekiq
|
3
4
|
# Middleware is code configured to run before/after
|
4
5
|
# a message is processed. It is patterned after Rack
|
@@ -66,7 +67,6 @@ module Sidekiq
|
|
66
67
|
module Middleware
|
67
68
|
class Chain
|
68
69
|
include Enumerable
|
69
|
-
attr_reader :entries
|
70
70
|
|
71
71
|
def initialize_copy(copy)
|
72
72
|
copy.instance_variable_set(:@entries, entries.dup)
|
@@ -77,10 +77,14 @@ module Sidekiq
|
|
77
77
|
end
|
78
78
|
|
79
79
|
def initialize
|
80
|
-
@entries =
|
80
|
+
@entries = nil
|
81
81
|
yield self if block_given?
|
82
82
|
end
|
83
83
|
|
84
|
+
def entries
|
85
|
+
@entries ||= []
|
86
|
+
end
|
87
|
+
|
84
88
|
def remove(klass)
|
85
89
|
entries.delete_if { |entry| entry.klass == klass }
|
86
90
|
end
|
@@ -106,13 +110,17 @@ module Sidekiq
|
|
106
110
|
i = entries.index { |entry| entry.klass == newklass }
|
107
111
|
new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
|
108
112
|
i = entries.index { |entry| entry.klass == oldklass } || entries.count - 1
|
109
|
-
entries.insert(i+1, new_entry)
|
113
|
+
entries.insert(i + 1, new_entry)
|
110
114
|
end
|
111
115
|
|
112
116
|
def exists?(klass)
|
113
117
|
any? { |entry| entry.klass == klass }
|
114
118
|
end
|
115
119
|
|
120
|
+
def empty?
|
121
|
+
@entries.nil? || @entries.empty?
|
122
|
+
end
|
123
|
+
|
116
124
|
def retrieve
|
117
125
|
map(&:make_new)
|
118
126
|
end
|
@@ -122,6 +130,8 @@ module Sidekiq
|
|
122
130
|
end
|
123
131
|
|
124
132
|
def invoke(*args)
|
133
|
+
return yield if empty?
|
134
|
+
|
125
135
|
chain = retrieve.dup
|
126
136
|
traverse_chain = lambda do
|
127
137
|
if chain.empty?
|
@@ -139,7 +149,7 @@ module Sidekiq
|
|
139
149
|
|
140
150
|
def initialize(klass, *args)
|
141
151
|
@klass = klass
|
142
|
-
@args
|
152
|
+
@args = args
|
143
153
|
end
|
144
154
|
|
145
155
|
def make_new
|