sidekiq 5.1.1 → 7.1.2
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +5 -5
- data/Changes.md +627 -8
- data/LICENSE.txt +9 -0
- data/README.md +47 -50
- data/bin/sidekiq +22 -3
- data/bin/sidekiqload +213 -115
- data/bin/sidekiqmon +11 -0
- data/lib/generators/sidekiq/job_generator.rb +57 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +566 -329
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +241 -256
- data/lib/sidekiq/client.rb +125 -102
- data/lib/sidekiq/component.rb +68 -0
- data/lib/sidekiq/config.rb +278 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +49 -42
- data/lib/sidekiq/job.rb +374 -0
- data/lib/sidekiq/job_logger.rb +36 -9
- data/lib/sidekiq/job_retry.rb +147 -98
- data/lib/sidekiq/job_util.rb +105 -0
- data/lib/sidekiq/launcher.rb +207 -103
- data/lib/sidekiq/logger.rb +131 -0
- data/lib/sidekiq/manager.rb +43 -47
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +136 -0
- data/lib/sidekiq/middleware/chain.rb +113 -56
- data/lib/sidekiq/middleware/current_attributes.rb +95 -0
- data/lib/sidekiq/middleware/i18n.rb +7 -7
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +146 -0
- data/lib/sidekiq/paginator.rb +28 -16
- data/lib/sidekiq/processor.rb +159 -107
- data/lib/sidekiq/rails.rb +54 -43
- data/lib/sidekiq/redis_client_adapter.rb +96 -0
- data/lib/sidekiq/redis_connection.rb +39 -81
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +139 -48
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing/inline.rb +6 -5
- data/lib/sidekiq/testing.rb +70 -88
- data/lib/sidekiq/transaction_aware_client.rb +44 -0
- data/lib/sidekiq/version.rb +3 -1
- data/lib/sidekiq/web/action.rb +15 -11
- data/lib/sidekiq/web/application.rb +143 -77
- data/lib/sidekiq/web/csrf_protection.rb +180 -0
- data/lib/sidekiq/web/helpers.rb +144 -106
- data/lib/sidekiq/web/router.rb +23 -19
- data/lib/sidekiq/web.rb +60 -111
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +94 -183
- data/sidekiq.gemspec +25 -23
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/javascripts/application.js +130 -61
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +166 -0
- data/web/assets/javascripts/dashboard.js +36 -282
- data/web/assets/javascripts/metrics.js +264 -0
- data/web/assets/stylesheets/application-dark.css +147 -0
- data/web/assets/stylesheets/application-rtl.css +2 -95
- data/web/assets/stylesheets/application.css +134 -521
- data/web/assets/stylesheets/bootstrap.css +2 -2
- data/web/locales/ar.yml +71 -64
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -53
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +84 -65
- data/web/locales/es.yml +70 -54
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +83 -62
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +75 -64
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +63 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +68 -63
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +8 -2
- data/web/views/_job_info.erb +21 -4
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +4 -18
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +75 -25
- data/web/views/dashboard.erb +58 -18
- data/web/views/dead.erb +3 -3
- data/web/views/layout.erb +4 -2
- data/web/views/metrics.erb +82 -0
- data/web/views/metrics_for_job.erb +68 -0
- data/web/views/morgue.erb +14 -15
- data/web/views/queue.erb +33 -23
- data/web/views/queues.erb +14 -4
- data/web/views/retries.erb +19 -16
- data/web/views/retry.erb +3 -3
- data/web/views/scheduled.erb +17 -15
- metadata +71 -140
- data/.github/contributing.md +0 -32
- data/.github/issue_template.md +0 -11
- data/.gitignore +0 -13
- data/.travis.yml +0 -14
- data/3.0-Upgrade.md +0 -70
- data/4.0-Upgrade.md +0 -53
- data/5.0-Upgrade.md +0 -56
- data/COMM-LICENSE +0 -95
- data/Ent-Changes.md +0 -210
- data/Gemfile +0 -8
- data/LICENSE +0 -9
- data/Pro-2.0-Upgrade.md +0 -138
- data/Pro-3.0-Upgrade.md +0 -44
- data/Pro-4.0-Upgrade.md +0 -35
- data/Pro-Changes.md +0 -716
- data/Rakefile +0 -8
- data/bin/sidekiqctl +0 -99
- data/code_of_conduct.md +0 -50
- data/lib/generators/sidekiq/worker_generator.rb +0 -49
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/delay.rb +0 -41
- data/lib/sidekiq/exception_handler.rb +0 -29
- data/lib/sidekiq/extensions/action_mailer.rb +0 -57
- data/lib/sidekiq/extensions/active_record.rb +0 -40
- data/lib/sidekiq/extensions/class_methods.rb +0 -40
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -22
- data/lib/sidekiq/middleware/server/active_record_cache.rb +0 -11
- data/lib/sidekiq/util.rb +0 -66
- data/lib/sidekiq/worker.rb +0 -204
data/lib/sidekiq/launcher.rb
CHANGED
@@ -1,56 +1,71 @@
|
|
1
|
-
# encoding: utf-8
|
2
1
|
# frozen_string_literal: true
|
3
|
-
|
4
|
-
require
|
5
|
-
require
|
2
|
+
|
3
|
+
require "sidekiq/manager"
|
4
|
+
require "sidekiq/capsule"
|
5
|
+
require "sidekiq/scheduled"
|
6
|
+
require "sidekiq/ring_buffer"
|
6
7
|
|
7
8
|
module Sidekiq
|
8
|
-
# The Launcher
|
9
|
-
# start, monitor and stop the core Actors in Sidekiq.
|
10
|
-
# If any of these actors die, the Sidekiq process exits
|
11
|
-
# immediately.
|
9
|
+
# The Launcher starts the Capsule Managers, the Poller thread and provides the process heartbeat.
|
12
10
|
class Launcher
|
13
|
-
include
|
11
|
+
include Sidekiq::Component
|
12
|
+
|
13
|
+
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
|
14
|
+
|
15
|
+
PROCTITLES = [
|
16
|
+
proc { "sidekiq" },
|
17
|
+
proc { Sidekiq::VERSION },
|
18
|
+
proc { |me, data| data["tag"] },
|
19
|
+
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{me.config.total_concurrency} busy]" },
|
20
|
+
proc { |me, data| "stopping" if me.stopping? }
|
21
|
+
]
|
14
22
|
|
15
|
-
attr_accessor :
|
23
|
+
attr_accessor :managers, :poller
|
16
24
|
|
17
|
-
def initialize(
|
18
|
-
@
|
19
|
-
@
|
25
|
+
def initialize(config, embedded: false)
|
26
|
+
@config = config
|
27
|
+
@embedded = embedded
|
28
|
+
@managers = config.capsules.values.map do |cap|
|
29
|
+
Sidekiq::Manager.new(cap)
|
30
|
+
end
|
31
|
+
@poller = Sidekiq::Scheduled::Poller.new(@config)
|
20
32
|
@done = false
|
21
|
-
@options = options
|
22
33
|
end
|
23
34
|
|
24
|
-
|
25
|
-
|
35
|
+
# Start this Sidekiq instance. If an embedding process already
|
36
|
+
# has a heartbeat thread, caller can use `async_beat: false`
|
37
|
+
# and instead have thread call Launcher#heartbeat every N seconds.
|
38
|
+
def run(async_beat: true)
|
39
|
+
Sidekiq.freeze!
|
40
|
+
logger.debug { @config.merge!({}) }
|
41
|
+
@thread = safe_thread("heartbeat", &method(:start_heartbeat)) if async_beat
|
26
42
|
@poller.start
|
27
|
-
@
|
43
|
+
@managers.each(&:start)
|
28
44
|
end
|
29
45
|
|
30
46
|
# Stops this instance from processing any more jobs,
|
31
|
-
#
|
32
47
|
def quiet
|
48
|
+
return if @done
|
49
|
+
|
33
50
|
@done = true
|
34
|
-
@
|
51
|
+
@managers.each(&:quiet)
|
35
52
|
@poller.terminate
|
53
|
+
fire_event(:quiet, reverse: true)
|
36
54
|
end
|
37
55
|
|
38
|
-
# Shuts down
|
39
|
-
# return until all work is complete and cleaned up.
|
40
|
-
# It can take up to the timeout to complete.
|
56
|
+
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
|
41
57
|
def stop
|
42
|
-
deadline =
|
58
|
+
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
|
43
59
|
|
44
|
-
|
45
|
-
@
|
46
|
-
|
47
|
-
|
48
|
-
|
60
|
+
quiet
|
61
|
+
stoppers = @managers.map do |mgr|
|
62
|
+
Thread.new do
|
63
|
+
mgr.stop(deadline)
|
64
|
+
end
|
65
|
+
end
|
49
66
|
|
50
|
-
|
51
|
-
|
52
|
-
strategy = (@options[:fetch] || Sidekiq::BasicFetch)
|
53
|
-
strategy.bulk_requeue([], @options)
|
67
|
+
fire_event(:shutdown, reverse: true)
|
68
|
+
stoppers.each(&:join)
|
54
69
|
|
55
70
|
clear_heartbeat
|
56
71
|
end
|
@@ -59,109 +74,198 @@ module Sidekiq
|
|
59
74
|
@done
|
60
75
|
end
|
61
76
|
|
77
|
+
# If embedding Sidekiq, you can have the process heartbeat
|
78
|
+
# call this method to regularly heartbeat rather than creating
|
79
|
+
# a separate thread.
|
80
|
+
def heartbeat
|
81
|
+
❤
|
82
|
+
end
|
83
|
+
|
62
84
|
private unless $TESTING
|
63
85
|
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
86
|
+
BEAT_PAUSE = 10
|
87
|
+
|
88
|
+
def start_heartbeat
|
89
|
+
loop do
|
90
|
+
beat
|
91
|
+
sleep BEAT_PAUSE
|
92
|
+
end
|
93
|
+
logger.info("Heartbeat stopping...")
|
94
|
+
end
|
68
95
|
|
96
|
+
def beat
|
97
|
+
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ") unless @embedded
|
69
98
|
❤
|
70
99
|
end
|
71
100
|
|
101
|
+
def clear_heartbeat
|
102
|
+
flush_stats
|
103
|
+
|
104
|
+
# Remove record from Redis since we are shutting down.
|
105
|
+
# Note we don't stop the heartbeat thread; if the process
|
106
|
+
# doesn't actually exit, it'll reappear in the Web UI.
|
107
|
+
redis do |conn|
|
108
|
+
conn.pipelined do |pipeline|
|
109
|
+
pipeline.srem("processes", [identity])
|
110
|
+
pipeline.unlink("#{identity}:work")
|
111
|
+
end
|
112
|
+
end
|
113
|
+
rescue
|
114
|
+
# best effort, ignore network errors
|
115
|
+
end
|
116
|
+
|
117
|
+
def flush_stats
|
118
|
+
fails = Processor::FAILURE.reset
|
119
|
+
procd = Processor::PROCESSED.reset
|
120
|
+
return if fails + procd == 0
|
121
|
+
|
122
|
+
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
123
|
+
begin
|
124
|
+
redis do |conn|
|
125
|
+
conn.pipelined do |pipeline|
|
126
|
+
pipeline.incrby("stat:processed", procd)
|
127
|
+
pipeline.incrby("stat:processed:#{nowdate}", procd)
|
128
|
+
pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
|
129
|
+
|
130
|
+
pipeline.incrby("stat:failed", fails)
|
131
|
+
pipeline.incrby("stat:failed:#{nowdate}", fails)
|
132
|
+
pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
|
133
|
+
end
|
134
|
+
end
|
135
|
+
rescue => ex
|
136
|
+
logger.warn("Unable to flush stats: #{ex}")
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
72
140
|
def ❤
|
73
141
|
key = identity
|
74
142
|
fails = procd = 0
|
143
|
+
|
75
144
|
begin
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
conn.
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
conn.incrby("stat:failed:#{nowdate}", fails)
|
87
|
-
conn.del(workers_key)
|
88
|
-
Processor::WORKER_STATE.each_pair do |tid, hash|
|
89
|
-
conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
|
145
|
+
flush_stats
|
146
|
+
|
147
|
+
curstate = Processor::WORK_STATE.dup
|
148
|
+
redis do |conn|
|
149
|
+
# work is the current set of executing jobs
|
150
|
+
work_key = "#{key}:work"
|
151
|
+
conn.pipelined do |transaction|
|
152
|
+
transaction.unlink(work_key)
|
153
|
+
curstate.each_pair do |tid, hash|
|
154
|
+
transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
|
90
155
|
end
|
91
|
-
|
156
|
+
transaction.expire(work_key, 60)
|
92
157
|
end
|
93
158
|
end
|
159
|
+
|
160
|
+
rtt = check_rtt
|
161
|
+
|
94
162
|
fails = procd = 0
|
163
|
+
kb = memory_usage(::Process.pid)
|
95
164
|
|
96
|
-
_, exists, _, _,
|
97
|
-
conn.multi
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
165
|
+
_, exists, _, _, signal = redis { |conn|
|
166
|
+
conn.multi { |transaction|
|
167
|
+
transaction.sadd("processes", [key])
|
168
|
+
transaction.exists(key)
|
169
|
+
transaction.hset(key, "info", to_json,
|
170
|
+
"busy", curstate.size,
|
171
|
+
"beat", Time.now.to_f,
|
172
|
+
"rtt_us", rtt,
|
173
|
+
"quiet", @done.to_s,
|
174
|
+
"rss", kb)
|
175
|
+
transaction.expire(key, 60)
|
176
|
+
transaction.rpop("#{key}-signals")
|
177
|
+
}
|
178
|
+
}
|
105
179
|
|
106
180
|
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
|
107
|
-
fire_event(:heartbeat)
|
181
|
+
fire_event(:heartbeat) unless exists > 0
|
182
|
+
fire_event(:beat, oneshot: false)
|
108
183
|
|
109
|
-
|
110
|
-
|
111
|
-
::Process.kill(msg, $$)
|
184
|
+
::Process.kill(signal, ::Process.pid) if signal && !@embedded
|
112
185
|
rescue => e
|
113
186
|
# ignore all redis/network issues
|
114
|
-
logger.error("heartbeat: #{e
|
187
|
+
logger.error("heartbeat: #{e}")
|
115
188
|
# don't lose the counts if there was a network issue
|
116
|
-
Processor::PROCESSED.
|
117
|
-
Processor::FAILURE.
|
189
|
+
Processor::PROCESSED.incr(procd)
|
190
|
+
Processor::FAILURE.incr(fails)
|
118
191
|
end
|
119
192
|
end
|
120
193
|
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
194
|
+
# We run the heartbeat every five seconds.
|
195
|
+
# Capture five samples of RTT, log a warning if each sample
|
196
|
+
# is above our warning threshold.
|
197
|
+
RTT_READINGS = RingBuffer.new(5)
|
198
|
+
RTT_WARNING_LEVEL = 50_000
|
199
|
+
|
200
|
+
def check_rtt
|
201
|
+
a = b = 0
|
202
|
+
redis do |x|
|
203
|
+
a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
204
|
+
x.ping
|
205
|
+
b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
206
|
+
end
|
207
|
+
rtt = b - a
|
208
|
+
RTT_READINGS << rtt
|
209
|
+
# Ideal RTT for Redis is < 1000µs
|
210
|
+
# Workable is < 10,000µs
|
211
|
+
# Log a warning if it's a disaster.
|
212
|
+
if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
|
213
|
+
logger.warn <<~EOM
|
214
|
+
Your Redis network connection is performing extremely poorly.
|
215
|
+
Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
|
216
|
+
Ensure Redis is running in the same AZ or datacenter as Sidekiq.
|
217
|
+
If these values are close to 100,000, that means your Sidekiq process may be
|
218
|
+
CPU-saturated; reduce your concurrency and/or see https://github.com/sidekiq/sidekiq/discussions/5039
|
219
|
+
EOM
|
220
|
+
RTT_READINGS.reset
|
125
221
|
end
|
126
|
-
|
222
|
+
rtt
|
127
223
|
end
|
128
224
|
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
225
|
+
MEMORY_GRABBER = case RUBY_PLATFORM
|
226
|
+
when /linux/
|
227
|
+
->(pid) {
|
228
|
+
IO.readlines("/proc/#{$$}/status").each do |line|
|
229
|
+
next unless line.start_with?("VmRSS:")
|
230
|
+
break line.split[1].to_i
|
231
|
+
end
|
232
|
+
}
|
233
|
+
when /darwin|bsd/
|
234
|
+
->(pid) {
|
235
|
+
`ps -o pid,rss -p #{pid}`.lines.last.split.last.to_i
|
236
|
+
}
|
237
|
+
else
|
238
|
+
->(pid) { 0 }
|
142
239
|
end
|
143
240
|
|
144
|
-
def
|
145
|
-
|
146
|
-
# this data changes infrequently so dump it to a string
|
147
|
-
# now so we don't need to dump it every heartbeat.
|
148
|
-
Sidekiq.dump_json(to_data)
|
149
|
-
end
|
241
|
+
def memory_usage(pid)
|
242
|
+
MEMORY_GRABBER.call(pid)
|
150
243
|
end
|
151
244
|
|
152
|
-
def
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
245
|
+
def to_data
|
246
|
+
@data ||= {
|
247
|
+
"hostname" => hostname,
|
248
|
+
"started_at" => Time.now.to_f,
|
249
|
+
"pid" => ::Process.pid,
|
250
|
+
"tag" => @config[:tag] || "",
|
251
|
+
"concurrency" => @config.total_concurrency,
|
252
|
+
"queues" => @config.capsules.values.flat_map { |cap| cap.queues }.uniq,
|
253
|
+
"weights" => to_weights,
|
254
|
+
"labels" => @config[:labels].to_a,
|
255
|
+
"identity" => identity,
|
256
|
+
"version" => Sidekiq::VERSION,
|
257
|
+
"embedded" => @embedded
|
258
|
+
}
|
164
259
|
end
|
165
260
|
|
261
|
+
def to_weights
|
262
|
+
@config.capsules.values.map(&:weights)
|
263
|
+
end
|
264
|
+
|
265
|
+
def to_json
|
266
|
+
# this data changes infrequently so dump it to a string
|
267
|
+
# now so we don't need to dump it every heartbeat.
|
268
|
+
@json ||= Sidekiq.dump_json(to_data)
|
269
|
+
end
|
166
270
|
end
|
167
271
|
end
|
@@ -0,0 +1,131 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "logger"
|
4
|
+
require "time"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
module Context
|
8
|
+
def self.with(hash)
|
9
|
+
orig_context = current.dup
|
10
|
+
current.merge!(hash)
|
11
|
+
yield
|
12
|
+
ensure
|
13
|
+
Thread.current[:sidekiq_context] = orig_context
|
14
|
+
end
|
15
|
+
|
16
|
+
def self.current
|
17
|
+
Thread.current[:sidekiq_context] ||= {}
|
18
|
+
end
|
19
|
+
|
20
|
+
def self.add(k, v)
|
21
|
+
current[k] = v
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
module LoggingUtils
|
26
|
+
LEVELS = {
|
27
|
+
"debug" => 0,
|
28
|
+
"info" => 1,
|
29
|
+
"warn" => 2,
|
30
|
+
"error" => 3,
|
31
|
+
"fatal" => 4
|
32
|
+
}
|
33
|
+
LEVELS.default_proc = proc do |_, level|
|
34
|
+
puts("Invalid log level: #{level.inspect}")
|
35
|
+
nil
|
36
|
+
end
|
37
|
+
|
38
|
+
LEVELS.each do |level, numeric_level|
|
39
|
+
define_method("#{level}?") do
|
40
|
+
local_level.nil? ? super() : local_level <= numeric_level
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
def local_level
|
45
|
+
Thread.current[:sidekiq_log_level]
|
46
|
+
end
|
47
|
+
|
48
|
+
def local_level=(level)
|
49
|
+
case level
|
50
|
+
when Integer
|
51
|
+
Thread.current[:sidekiq_log_level] = level
|
52
|
+
when Symbol, String
|
53
|
+
Thread.current[:sidekiq_log_level] = LEVELS[level.to_s]
|
54
|
+
when nil
|
55
|
+
Thread.current[:sidekiq_log_level] = nil
|
56
|
+
else
|
57
|
+
raise ArgumentError, "Invalid log level: #{level.inspect}"
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def level
|
62
|
+
local_level || super
|
63
|
+
end
|
64
|
+
|
65
|
+
# Change the thread-local level for the duration of the given block.
|
66
|
+
def log_at(level)
|
67
|
+
old_local_level = local_level
|
68
|
+
self.local_level = level
|
69
|
+
yield
|
70
|
+
ensure
|
71
|
+
self.local_level = old_local_level
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
class Logger < ::Logger
|
76
|
+
include LoggingUtils
|
77
|
+
|
78
|
+
module Formatters
|
79
|
+
class Base < ::Logger::Formatter
|
80
|
+
def tid
|
81
|
+
Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
|
82
|
+
end
|
83
|
+
|
84
|
+
def ctx
|
85
|
+
Sidekiq::Context.current
|
86
|
+
end
|
87
|
+
|
88
|
+
def format_context
|
89
|
+
if ctx.any?
|
90
|
+
" " + ctx.compact.map { |k, v|
|
91
|
+
case v
|
92
|
+
when Array
|
93
|
+
"#{k}=#{v.join(",")}"
|
94
|
+
else
|
95
|
+
"#{k}=#{v}"
|
96
|
+
end
|
97
|
+
}.join(" ")
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
class Pretty < Base
|
103
|
+
def call(severity, time, program_name, message)
|
104
|
+
"#{time.utc.iso8601(3)} pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
class WithoutTimestamp < Pretty
|
109
|
+
def call(severity, time, program_name, message)
|
110
|
+
"pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
class JSON < Base
|
115
|
+
def call(severity, time, program_name, message)
|
116
|
+
hash = {
|
117
|
+
ts: time.utc.iso8601(3),
|
118
|
+
pid: ::Process.pid,
|
119
|
+
tid: tid,
|
120
|
+
lvl: severity,
|
121
|
+
msg: message
|
122
|
+
}
|
123
|
+
c = ctx
|
124
|
+
hash["ctx"] = c unless c.empty?
|
125
|
+
|
126
|
+
Sidekiq.dump_json(hash) << "\n"
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
end
|
data/lib/sidekiq/manager.rb
CHANGED
@@ -1,13 +1,9 @@
|
|
1
|
-
# encoding: utf-8
|
2
1
|
# frozen_string_literal: true
|
3
|
-
require 'sidekiq/util'
|
4
|
-
require 'sidekiq/processor'
|
5
|
-
require 'sidekiq/fetch'
|
6
|
-
require 'thread'
|
7
|
-
require 'set'
|
8
2
|
|
9
|
-
|
3
|
+
require "sidekiq/processor"
|
4
|
+
require "set"
|
10
5
|
|
6
|
+
module Sidekiq
|
11
7
|
##
|
12
8
|
# The Manager is the central coordination point in Sidekiq, controlling
|
13
9
|
# the lifecycle of the Processors.
|
@@ -23,46 +19,38 @@ module Sidekiq
|
|
23
19
|
# the shutdown process. The other tasks are performed by other threads.
|
24
20
|
#
|
25
21
|
class Manager
|
26
|
-
include
|
22
|
+
include Sidekiq::Component
|
27
23
|
|
28
24
|
attr_reader :workers
|
29
|
-
attr_reader :
|
25
|
+
attr_reader :capsule
|
30
26
|
|
31
|
-
def initialize(
|
32
|
-
|
33
|
-
@
|
34
|
-
@count = options[:concurrency] || 25
|
27
|
+
def initialize(capsule)
|
28
|
+
@config = @capsule = capsule
|
29
|
+
@count = capsule.concurrency
|
35
30
|
raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
|
36
31
|
|
37
32
|
@done = false
|
38
33
|
@workers = Set.new
|
34
|
+
@plock = Mutex.new
|
39
35
|
@count.times do
|
40
|
-
@workers << Processor.new(
|
36
|
+
@workers << Processor.new(@config, &method(:processor_result))
|
41
37
|
end
|
42
|
-
@plock = Mutex.new
|
43
38
|
end
|
44
39
|
|
45
40
|
def start
|
46
|
-
@workers.each
|
47
|
-
x.start
|
48
|
-
end
|
41
|
+
@workers.each(&:start)
|
49
42
|
end
|
50
43
|
|
51
44
|
def quiet
|
52
45
|
return if @done
|
53
46
|
@done = true
|
54
47
|
|
55
|
-
logger.info { "Terminating quiet
|
56
|
-
@workers.each
|
57
|
-
fire_event(:quiet, reverse: true)
|
48
|
+
logger.info { "Terminating quiet threads for #{capsule.name} capsule" }
|
49
|
+
@workers.each(&:terminate)
|
58
50
|
end
|
59
51
|
|
60
|
-
# hack for quicker development / testing environment #2774
|
61
|
-
PAUSE_TIME = STDOUT.tty? ? 0.1 : 0.5
|
62
|
-
|
63
52
|
def stop(deadline)
|
64
53
|
quiet
|
65
|
-
fire_event(:shutdown, reverse: true)
|
66
54
|
|
67
55
|
# some of the shutdown events can be async,
|
68
56
|
# we don't have any way to know when they're done but
|
@@ -70,29 +58,20 @@ module Sidekiq
|
|
70
58
|
sleep PAUSE_TIME
|
71
59
|
return if @workers.empty?
|
72
60
|
|
73
|
-
logger.info { "Pausing to allow
|
74
|
-
|
75
|
-
while remaining > PAUSE_TIME
|
76
|
-
return if @workers.empty?
|
77
|
-
sleep PAUSE_TIME
|
78
|
-
remaining = deadline - Time.now
|
79
|
-
end
|
61
|
+
logger.info { "Pausing to allow jobs to finish..." }
|
62
|
+
wait_for(deadline) { @workers.empty? }
|
80
63
|
return if @workers.empty?
|
81
64
|
|
82
65
|
hard_shutdown
|
66
|
+
ensure
|
67
|
+
capsule.stop
|
83
68
|
end
|
84
69
|
|
85
|
-
def
|
86
|
-
@plock.synchronize do
|
87
|
-
@workers.delete(processor)
|
88
|
-
end
|
89
|
-
end
|
90
|
-
|
91
|
-
def processor_died(processor, reason)
|
70
|
+
def processor_result(processor, reason = nil)
|
92
71
|
@plock.synchronize do
|
93
72
|
@workers.delete(processor)
|
94
73
|
unless @done
|
95
|
-
p = Processor.new(
|
74
|
+
p = Processor.new(@config, &method(:processor_result))
|
96
75
|
@workers << p
|
97
76
|
p.start
|
98
77
|
end
|
@@ -106,7 +85,7 @@ module Sidekiq
|
|
106
85
|
private
|
107
86
|
|
108
87
|
def hard_shutdown
|
109
|
-
# We've reached the timeout and we still have busy
|
88
|
+
# We've reached the timeout and we still have busy threads.
|
110
89
|
# They must die but their jobs shall live on.
|
111
90
|
cleanup = nil
|
112
91
|
@plock.synchronize do
|
@@ -114,25 +93,42 @@ module Sidekiq
|
|
114
93
|
end
|
115
94
|
|
116
95
|
if cleanup.size > 0
|
117
|
-
jobs = cleanup.map {|p| p.job }.compact
|
96
|
+
jobs = cleanup.map { |p| p.job }.compact
|
118
97
|
|
119
|
-
logger.warn { "Terminating #{cleanup.size} busy
|
120
|
-
logger.
|
98
|
+
logger.warn { "Terminating #{cleanup.size} busy threads" }
|
99
|
+
logger.debug { "Jobs still in progress #{jobs.inspect}" }
|
121
100
|
|
122
101
|
# Re-enqueue unfinished jobs
|
123
102
|
# NOTE: You may notice that we may push a job back to redis before
|
124
|
-
# the
|
103
|
+
# the thread is terminated. This is ok because Sidekiq's
|
125
104
|
# contract says that jobs are run AT LEAST once. Process termination
|
126
105
|
# is delayed until we're certain the jobs are back in Redis because
|
127
106
|
# it is worse to lose a job than to run it twice.
|
128
|
-
|
129
|
-
strategy.bulk_requeue(jobs, @options)
|
107
|
+
capsule.fetcher.bulk_requeue(jobs)
|
130
108
|
end
|
131
109
|
|
132
110
|
cleanup.each do |processor|
|
133
111
|
processor.kill
|
134
112
|
end
|
113
|
+
|
114
|
+
# when this method returns, we immediately call `exit` which may not give
|
115
|
+
# the remaining threads time to run `ensure` blocks, etc. We pause here up
|
116
|
+
# to 3 seconds to give threads a minimal amount of time to run `ensure` blocks.
|
117
|
+
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3
|
118
|
+
wait_for(deadline) { @workers.empty? }
|
135
119
|
end
|
136
120
|
|
121
|
+
# hack for quicker development / testing environment #2774
|
122
|
+
PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
|
123
|
+
|
124
|
+
# Wait for the orblock to be true or the deadline passed.
|
125
|
+
def wait_for(deadline, &condblock)
|
126
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
127
|
+
while remaining > PAUSE_TIME
|
128
|
+
return if condblock.call
|
129
|
+
sleep PAUSE_TIME
|
130
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
131
|
+
end
|
132
|
+
end
|
137
133
|
end
|
138
134
|
end
|