sidekiq 5.2.6 → 7.1.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +537 -8
- data/LICENSE.txt +9 -0
- data/README.md +47 -50
- data/bin/sidekiq +22 -3
- data/bin/sidekiqload +213 -115
- data/bin/sidekiqmon +11 -0
- data/lib/generators/sidekiq/job_generator.rb +57 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +556 -351
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +203 -226
- data/lib/sidekiq/client.rb +121 -101
- data/lib/sidekiq/component.rb +68 -0
- data/lib/sidekiq/config.rb +274 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +49 -42
- data/lib/sidekiq/job.rb +374 -0
- data/lib/sidekiq/job_logger.rb +33 -7
- data/lib/sidekiq/job_retry.rb +131 -108
- data/lib/sidekiq/job_util.rb +105 -0
- data/lib/sidekiq/launcher.rb +203 -105
- data/lib/sidekiq/logger.rb +131 -0
- data/lib/sidekiq/manager.rb +43 -46
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +136 -0
- data/lib/sidekiq/middleware/chain.rb +113 -56
- data/lib/sidekiq/middleware/current_attributes.rb +56 -0
- data/lib/sidekiq/middleware/i18n.rb +7 -7
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +146 -0
- data/lib/sidekiq/paginator.rb +28 -16
- data/lib/sidekiq/processor.rb +108 -107
- data/lib/sidekiq/rails.rb +49 -38
- data/lib/sidekiq/redis_client_adapter.rb +96 -0
- data/lib/sidekiq/redis_connection.rb +38 -107
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +111 -49
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing/inline.rb +6 -5
- data/lib/sidekiq/testing.rb +66 -84
- data/lib/sidekiq/transaction_aware_client.rb +44 -0
- data/lib/sidekiq/version.rb +3 -1
- data/lib/sidekiq/web/action.rb +15 -11
- data/lib/sidekiq/web/application.rb +123 -79
- data/lib/sidekiq/web/csrf_protection.rb +180 -0
- data/lib/sidekiq/web/helpers.rb +137 -106
- data/lib/sidekiq/web/router.rb +23 -19
- data/lib/sidekiq/web.rb +56 -107
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +92 -182
- data/sidekiq.gemspec +25 -16
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/javascripts/application.js +130 -61
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +166 -0
- data/web/assets/javascripts/dashboard.js +36 -292
- data/web/assets/javascripts/metrics.js +264 -0
- data/web/assets/stylesheets/application-dark.css +147 -0
- data/web/assets/stylesheets/application-rtl.css +2 -95
- data/web/assets/stylesheets/application.css +102 -522
- data/web/locales/ar.yml +71 -65
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -53
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +84 -66
- data/web/locales/es.yml +70 -54
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +83 -62
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +75 -64
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +63 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +68 -63
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +6 -3
- data/web/views/_job_info.erb +21 -4
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +1 -1
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +75 -25
- data/web/views/dashboard.erb +58 -18
- data/web/views/dead.erb +3 -3
- data/web/views/layout.erb +3 -1
- data/web/views/metrics.erb +82 -0
- data/web/views/metrics_for_job.erb +68 -0
- data/web/views/morgue.erb +14 -15
- data/web/views/queue.erb +33 -24
- data/web/views/queues.erb +13 -3
- data/web/views/retries.erb +16 -17
- data/web/views/retry.erb +3 -3
- data/web/views/scheduled.erb +17 -15
- metadata +69 -69
- data/.github/contributing.md +0 -32
- data/.github/issue_template.md +0 -11
- data/.gitignore +0 -15
- data/.travis.yml +0 -11
- data/3.0-Upgrade.md +0 -70
- data/4.0-Upgrade.md +0 -53
- data/5.0-Upgrade.md +0 -56
- data/COMM-LICENSE +0 -97
- data/Ent-Changes.md +0 -238
- data/Gemfile +0 -23
- data/LICENSE +0 -9
- data/Pro-2.0-Upgrade.md +0 -138
- data/Pro-3.0-Upgrade.md +0 -44
- data/Pro-4.0-Upgrade.md +0 -35
- data/Pro-Changes.md +0 -759
- data/Rakefile +0 -9
- data/bin/sidekiqctl +0 -20
- data/code_of_conduct.md +0 -50
- data/lib/generators/sidekiq/worker_generator.rb +0 -49
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/ctl.rb +0 -221
- data/lib/sidekiq/delay.rb +0 -42
- data/lib/sidekiq/exception_handler.rb +0 -29
- data/lib/sidekiq/extensions/action_mailer.rb +0 -57
- data/lib/sidekiq/extensions/active_record.rb +0 -40
- data/lib/sidekiq/extensions/class_methods.rb +0 -40
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
- data/lib/sidekiq/util.rb +0 -66
- data/lib/sidekiq/worker.rb +0 -220
data/lib/sidekiq/launcher.rb
CHANGED
@@ -1,57 +1,71 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require
|
4
|
-
require
|
2
|
+
|
3
|
+
require "sidekiq/manager"
|
4
|
+
require "sidekiq/capsule"
|
5
|
+
require "sidekiq/scheduled"
|
6
|
+
require "sidekiq/ring_buffer"
|
5
7
|
|
6
8
|
module Sidekiq
|
7
|
-
# The Launcher
|
8
|
-
# start, monitor and stop the core Actors in Sidekiq.
|
9
|
-
# If any of these actors die, the Sidekiq process exits
|
10
|
-
# immediately.
|
9
|
+
# The Launcher starts the Capsule Managers, the Poller thread and provides the process heartbeat.
|
11
10
|
class Launcher
|
12
|
-
include
|
11
|
+
include Sidekiq::Component
|
12
|
+
|
13
|
+
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
|
13
14
|
|
14
|
-
|
15
|
+
PROCTITLES = [
|
16
|
+
proc { "sidekiq" },
|
17
|
+
proc { Sidekiq::VERSION },
|
18
|
+
proc { |me, data| data["tag"] },
|
19
|
+
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{me.config.total_concurrency} busy]" },
|
20
|
+
proc { |me, data| "stopping" if me.stopping? }
|
21
|
+
]
|
15
22
|
|
16
|
-
|
23
|
+
attr_accessor :managers, :poller
|
17
24
|
|
18
|
-
def initialize(
|
19
|
-
@
|
20
|
-
@
|
25
|
+
def initialize(config, embedded: false)
|
26
|
+
@config = config
|
27
|
+
@embedded = embedded
|
28
|
+
@managers = config.capsules.values.map do |cap|
|
29
|
+
Sidekiq::Manager.new(cap)
|
30
|
+
end
|
31
|
+
@poller = Sidekiq::Scheduled::Poller.new(@config)
|
21
32
|
@done = false
|
22
|
-
@options = options
|
23
33
|
end
|
24
34
|
|
25
|
-
|
26
|
-
|
35
|
+
# Start this Sidekiq instance. If an embedding process already
|
36
|
+
# has a heartbeat thread, caller can use `async_beat: false`
|
37
|
+
# and instead have thread call Launcher#heartbeat every N seconds.
|
38
|
+
def run(async_beat: true)
|
39
|
+
Sidekiq.freeze!
|
40
|
+
logger.debug { @config.merge!({}) }
|
41
|
+
@thread = safe_thread("heartbeat", &method(:start_heartbeat)) if async_beat
|
27
42
|
@poller.start
|
28
|
-
@
|
43
|
+
@managers.each(&:start)
|
29
44
|
end
|
30
45
|
|
31
46
|
# Stops this instance from processing any more jobs,
|
32
|
-
#
|
33
47
|
def quiet
|
48
|
+
return if @done
|
49
|
+
|
34
50
|
@done = true
|
35
|
-
@
|
51
|
+
@managers.each(&:quiet)
|
36
52
|
@poller.terminate
|
53
|
+
fire_event(:quiet, reverse: true)
|
37
54
|
end
|
38
55
|
|
39
|
-
# Shuts down
|
40
|
-
# return until all work is complete and cleaned up.
|
41
|
-
# It can take up to the timeout to complete.
|
56
|
+
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
|
42
57
|
def stop
|
43
|
-
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @
|
58
|
+
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
|
44
59
|
|
45
|
-
|
46
|
-
@
|
47
|
-
|
48
|
-
|
49
|
-
|
60
|
+
quiet
|
61
|
+
stoppers = @managers.map do |mgr|
|
62
|
+
Thread.new do
|
63
|
+
mgr.stop(deadline)
|
64
|
+
end
|
65
|
+
end
|
50
66
|
|
51
|
-
|
52
|
-
|
53
|
-
strategy = (@options[:fetch] || Sidekiq::BasicFetch)
|
54
|
-
strategy.bulk_requeue([], @options)
|
67
|
+
fire_event(:shutdown, reverse: true)
|
68
|
+
stoppers.each(&:join)
|
55
69
|
|
56
70
|
clear_heartbeat
|
57
71
|
end
|
@@ -60,114 +74,198 @@ module Sidekiq
|
|
60
74
|
@done
|
61
75
|
end
|
62
76
|
|
77
|
+
# If embedding Sidekiq, you can have the process heartbeat
|
78
|
+
# call this method to regularly heartbeat rather than creating
|
79
|
+
# a separate thread.
|
80
|
+
def heartbeat
|
81
|
+
❤
|
82
|
+
end
|
83
|
+
|
63
84
|
private unless $TESTING
|
64
85
|
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
86
|
+
BEAT_PAUSE = 10
|
87
|
+
|
88
|
+
def start_heartbeat
|
89
|
+
loop do
|
90
|
+
beat
|
91
|
+
sleep BEAT_PAUSE
|
92
|
+
end
|
93
|
+
logger.info("Heartbeat stopping...")
|
94
|
+
end
|
69
95
|
|
96
|
+
def beat
|
97
|
+
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ") unless @embedded
|
70
98
|
❤
|
71
99
|
end
|
72
100
|
|
101
|
+
def clear_heartbeat
|
102
|
+
flush_stats
|
103
|
+
|
104
|
+
# Remove record from Redis since we are shutting down.
|
105
|
+
# Note we don't stop the heartbeat thread; if the process
|
106
|
+
# doesn't actually exit, it'll reappear in the Web UI.
|
107
|
+
redis do |conn|
|
108
|
+
conn.pipelined do |pipeline|
|
109
|
+
pipeline.srem("processes", [identity])
|
110
|
+
pipeline.unlink("#{identity}:work")
|
111
|
+
end
|
112
|
+
end
|
113
|
+
rescue
|
114
|
+
# best effort, ignore network errors
|
115
|
+
end
|
116
|
+
|
117
|
+
def flush_stats
|
118
|
+
fails = Processor::FAILURE.reset
|
119
|
+
procd = Processor::PROCESSED.reset
|
120
|
+
return if fails + procd == 0
|
121
|
+
|
122
|
+
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
123
|
+
begin
|
124
|
+
redis do |conn|
|
125
|
+
conn.pipelined do |pipeline|
|
126
|
+
pipeline.incrby("stat:processed", procd)
|
127
|
+
pipeline.incrby("stat:processed:#{nowdate}", procd)
|
128
|
+
pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
|
129
|
+
|
130
|
+
pipeline.incrby("stat:failed", fails)
|
131
|
+
pipeline.incrby("stat:failed:#{nowdate}", fails)
|
132
|
+
pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
|
133
|
+
end
|
134
|
+
end
|
135
|
+
rescue => ex
|
136
|
+
logger.warn("Unable to flush stats: #{ex}")
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
73
140
|
def ❤
|
74
141
|
key = identity
|
75
142
|
fails = procd = 0
|
143
|
+
|
76
144
|
begin
|
77
|
-
|
78
|
-
|
79
|
-
curstate = Processor::
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
conn.incrby("stat:processed", procd)
|
86
|
-
conn.incrby("stat:processed:#{nowdate}", procd)
|
87
|
-
conn.expire("stat:processed:#{nowdate}", STATS_TTL)
|
88
|
-
|
89
|
-
conn.incrby("stat:failed", fails)
|
90
|
-
conn.incrby("stat:failed:#{nowdate}", fails)
|
91
|
-
conn.expire("stat:failed:#{nowdate}", STATS_TTL)
|
92
|
-
|
93
|
-
conn.del(workers_key)
|
145
|
+
flush_stats
|
146
|
+
|
147
|
+
curstate = Processor::WORK_STATE.dup
|
148
|
+
redis do |conn|
|
149
|
+
# work is the current set of executing jobs
|
150
|
+
work_key = "#{key}:work"
|
151
|
+
conn.pipelined do |transaction|
|
152
|
+
transaction.unlink(work_key)
|
94
153
|
curstate.each_pair do |tid, hash|
|
95
|
-
|
154
|
+
transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
|
96
155
|
end
|
97
|
-
|
156
|
+
transaction.expire(work_key, 60)
|
98
157
|
end
|
99
158
|
end
|
159
|
+
|
160
|
+
rtt = check_rtt
|
161
|
+
|
100
162
|
fails = procd = 0
|
163
|
+
kb = memory_usage(::Process.pid)
|
101
164
|
|
102
|
-
_, exists, _, _,
|
103
|
-
conn.multi
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
165
|
+
_, exists, _, _, signal = redis { |conn|
|
166
|
+
conn.multi { |transaction|
|
167
|
+
transaction.sadd("processes", [key])
|
168
|
+
transaction.exists(key)
|
169
|
+
transaction.hset(key, "info", to_json,
|
170
|
+
"busy", curstate.size,
|
171
|
+
"beat", Time.now.to_f,
|
172
|
+
"rtt_us", rtt,
|
173
|
+
"quiet", @done.to_s,
|
174
|
+
"rss", kb)
|
175
|
+
transaction.expire(key, 60)
|
176
|
+
transaction.rpop("#{key}-signals")
|
177
|
+
}
|
178
|
+
}
|
111
179
|
|
112
180
|
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
|
113
|
-
fire_event(:heartbeat)
|
114
|
-
|
115
|
-
return unless msg
|
181
|
+
fire_event(:heartbeat) unless exists > 0
|
182
|
+
fire_event(:beat, oneshot: false)
|
116
183
|
|
117
|
-
::Process.kill(
|
184
|
+
::Process.kill(signal, ::Process.pid) if signal && !@embedded
|
118
185
|
rescue => e
|
119
186
|
# ignore all redis/network issues
|
120
|
-
logger.error("heartbeat: #{e
|
187
|
+
logger.error("heartbeat: #{e}")
|
121
188
|
# don't lose the counts if there was a network issue
|
122
189
|
Processor::PROCESSED.incr(procd)
|
123
190
|
Processor::FAILURE.incr(fails)
|
124
191
|
end
|
125
192
|
end
|
126
193
|
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
194
|
+
# We run the heartbeat every five seconds.
|
195
|
+
# Capture five samples of RTT, log a warning if each sample
|
196
|
+
# is above our warning threshold.
|
197
|
+
RTT_READINGS = RingBuffer.new(5)
|
198
|
+
RTT_WARNING_LEVEL = 50_000
|
199
|
+
|
200
|
+
def check_rtt
|
201
|
+
a = b = 0
|
202
|
+
redis do |x|
|
203
|
+
a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
204
|
+
x.ping
|
205
|
+
b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
131
206
|
end
|
132
|
-
|
207
|
+
rtt = b - a
|
208
|
+
RTT_READINGS << rtt
|
209
|
+
# Ideal RTT for Redis is < 1000µs
|
210
|
+
# Workable is < 10,000µs
|
211
|
+
# Log a warning if it's a disaster.
|
212
|
+
if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
|
213
|
+
logger.warn <<~EOM
|
214
|
+
Your Redis network connection is performing extremely poorly.
|
215
|
+
Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
|
216
|
+
Ensure Redis is running in the same AZ or datacenter as Sidekiq.
|
217
|
+
If these values are close to 100,000, that means your Sidekiq process may be
|
218
|
+
CPU-saturated; reduce your concurrency and/or see https://github.com/sidekiq/sidekiq/discussions/5039
|
219
|
+
EOM
|
220
|
+
RTT_READINGS.reset
|
221
|
+
end
|
222
|
+
rtt
|
133
223
|
end
|
134
224
|
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
225
|
+
MEMORY_GRABBER = case RUBY_PLATFORM
|
226
|
+
when /linux/
|
227
|
+
->(pid) {
|
228
|
+
IO.readlines("/proc/#{$$}/status").each do |line|
|
229
|
+
next unless line.start_with?("VmRSS:")
|
230
|
+
break line.split[1].to_i
|
231
|
+
end
|
232
|
+
}
|
233
|
+
when /darwin|bsd/
|
234
|
+
->(pid) {
|
235
|
+
`ps -o pid,rss -p #{pid}`.lines.last.split.last.to_i
|
236
|
+
}
|
237
|
+
else
|
238
|
+
->(pid) { 0 }
|
148
239
|
end
|
149
240
|
|
150
|
-
def
|
151
|
-
|
152
|
-
# this data changes infrequently so dump it to a string
|
153
|
-
# now so we don't need to dump it every heartbeat.
|
154
|
-
Sidekiq.dump_json(to_data)
|
155
|
-
end
|
241
|
+
def memory_usage(pid)
|
242
|
+
MEMORY_GRABBER.call(pid)
|
156
243
|
end
|
157
244
|
|
158
|
-
def
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
245
|
+
def to_data
|
246
|
+
@data ||= {
|
247
|
+
"hostname" => hostname,
|
248
|
+
"started_at" => Time.now.to_f,
|
249
|
+
"pid" => ::Process.pid,
|
250
|
+
"tag" => @config[:tag] || "",
|
251
|
+
"concurrency" => @config.total_concurrency,
|
252
|
+
"queues" => @config.capsules.values.flat_map { |cap| cap.queues }.uniq,
|
253
|
+
"weights" => to_weights,
|
254
|
+
"labels" => @config[:labels].to_a,
|
255
|
+
"identity" => identity,
|
256
|
+
"version" => Sidekiq::VERSION,
|
257
|
+
"embedded" => @embedded
|
258
|
+
}
|
170
259
|
end
|
171
260
|
|
261
|
+
def to_weights
|
262
|
+
@config.capsules.values.map(&:weights)
|
263
|
+
end
|
264
|
+
|
265
|
+
def to_json
|
266
|
+
# this data changes infrequently so dump it to a string
|
267
|
+
# now so we don't need to dump it every heartbeat.
|
268
|
+
@json ||= Sidekiq.dump_json(to_data)
|
269
|
+
end
|
172
270
|
end
|
173
271
|
end
|
@@ -0,0 +1,131 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "logger"
|
4
|
+
require "time"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
module Context
|
8
|
+
def self.with(hash)
|
9
|
+
orig_context = current.dup
|
10
|
+
current.merge!(hash)
|
11
|
+
yield
|
12
|
+
ensure
|
13
|
+
Thread.current[:sidekiq_context] = orig_context
|
14
|
+
end
|
15
|
+
|
16
|
+
def self.current
|
17
|
+
Thread.current[:sidekiq_context] ||= {}
|
18
|
+
end
|
19
|
+
|
20
|
+
def self.add(k, v)
|
21
|
+
current[k] = v
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
module LoggingUtils
|
26
|
+
LEVELS = {
|
27
|
+
"debug" => 0,
|
28
|
+
"info" => 1,
|
29
|
+
"warn" => 2,
|
30
|
+
"error" => 3,
|
31
|
+
"fatal" => 4
|
32
|
+
}
|
33
|
+
LEVELS.default_proc = proc do |_, level|
|
34
|
+
puts("Invalid log level: #{level.inspect}")
|
35
|
+
nil
|
36
|
+
end
|
37
|
+
|
38
|
+
LEVELS.each do |level, numeric_level|
|
39
|
+
define_method("#{level}?") do
|
40
|
+
local_level.nil? ? super() : local_level <= numeric_level
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
def local_level
|
45
|
+
Thread.current[:sidekiq_log_level]
|
46
|
+
end
|
47
|
+
|
48
|
+
def local_level=(level)
|
49
|
+
case level
|
50
|
+
when Integer
|
51
|
+
Thread.current[:sidekiq_log_level] = level
|
52
|
+
when Symbol, String
|
53
|
+
Thread.current[:sidekiq_log_level] = LEVELS[level.to_s]
|
54
|
+
when nil
|
55
|
+
Thread.current[:sidekiq_log_level] = nil
|
56
|
+
else
|
57
|
+
raise ArgumentError, "Invalid log level: #{level.inspect}"
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def level
|
62
|
+
local_level || super
|
63
|
+
end
|
64
|
+
|
65
|
+
# Change the thread-local level for the duration of the given block.
|
66
|
+
def log_at(level)
|
67
|
+
old_local_level = local_level
|
68
|
+
self.local_level = level
|
69
|
+
yield
|
70
|
+
ensure
|
71
|
+
self.local_level = old_local_level
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
class Logger < ::Logger
|
76
|
+
include LoggingUtils
|
77
|
+
|
78
|
+
module Formatters
|
79
|
+
class Base < ::Logger::Formatter
|
80
|
+
def tid
|
81
|
+
Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
|
82
|
+
end
|
83
|
+
|
84
|
+
def ctx
|
85
|
+
Sidekiq::Context.current
|
86
|
+
end
|
87
|
+
|
88
|
+
def format_context
|
89
|
+
if ctx.any?
|
90
|
+
" " + ctx.compact.map { |k, v|
|
91
|
+
case v
|
92
|
+
when Array
|
93
|
+
"#{k}=#{v.join(",")}"
|
94
|
+
else
|
95
|
+
"#{k}=#{v}"
|
96
|
+
end
|
97
|
+
}.join(" ")
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
class Pretty < Base
|
103
|
+
def call(severity, time, program_name, message)
|
104
|
+
"#{time.utc.iso8601(3)} pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
class WithoutTimestamp < Pretty
|
109
|
+
def call(severity, time, program_name, message)
|
110
|
+
"pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
class JSON < Base
|
115
|
+
def call(severity, time, program_name, message)
|
116
|
+
hash = {
|
117
|
+
ts: time.utc.iso8601(3),
|
118
|
+
pid: ::Process.pid,
|
119
|
+
tid: tid,
|
120
|
+
lvl: severity,
|
121
|
+
msg: message
|
122
|
+
}
|
123
|
+
c = ctx
|
124
|
+
hash["ctx"] = c unless c.empty?
|
125
|
+
|
126
|
+
Sidekiq.dump_json(hash) << "\n"
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
end
|
data/lib/sidekiq/manager.rb
CHANGED
@@ -1,12 +1,9 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
-
require 'sidekiq/util'
|
3
|
-
require 'sidekiq/processor'
|
4
|
-
require 'sidekiq/fetch'
|
5
|
-
require 'thread'
|
6
|
-
require 'set'
|
7
2
|
|
8
|
-
|
3
|
+
require "sidekiq/processor"
|
4
|
+
require "set"
|
9
5
|
|
6
|
+
module Sidekiq
|
10
7
|
##
|
11
8
|
# The Manager is the central coordination point in Sidekiq, controlling
|
12
9
|
# the lifecycle of the Processors.
|
@@ -22,46 +19,38 @@ module Sidekiq
|
|
22
19
|
# the shutdown process. The other tasks are performed by other threads.
|
23
20
|
#
|
24
21
|
class Manager
|
25
|
-
include
|
22
|
+
include Sidekiq::Component
|
26
23
|
|
27
24
|
attr_reader :workers
|
28
|
-
attr_reader :
|
25
|
+
attr_reader :capsule
|
29
26
|
|
30
|
-
def initialize(
|
31
|
-
|
32
|
-
@
|
33
|
-
@count = options[:concurrency] || 10
|
27
|
+
def initialize(capsule)
|
28
|
+
@config = @capsule = capsule
|
29
|
+
@count = capsule.concurrency
|
34
30
|
raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
|
35
31
|
|
36
32
|
@done = false
|
37
33
|
@workers = Set.new
|
34
|
+
@plock = Mutex.new
|
38
35
|
@count.times do
|
39
|
-
@workers << Processor.new(
|
36
|
+
@workers << Processor.new(@config, &method(:processor_result))
|
40
37
|
end
|
41
|
-
@plock = Mutex.new
|
42
38
|
end
|
43
39
|
|
44
40
|
def start
|
45
|
-
@workers.each
|
46
|
-
x.start
|
47
|
-
end
|
41
|
+
@workers.each(&:start)
|
48
42
|
end
|
49
43
|
|
50
44
|
def quiet
|
51
45
|
return if @done
|
52
46
|
@done = true
|
53
47
|
|
54
|
-
logger.info { "Terminating quiet
|
55
|
-
@workers.each
|
56
|
-
fire_event(:quiet, reverse: true)
|
48
|
+
logger.info { "Terminating quiet threads for #{capsule.name} capsule" }
|
49
|
+
@workers.each(&:terminate)
|
57
50
|
end
|
58
51
|
|
59
|
-
# hack for quicker development / testing environment #2774
|
60
|
-
PAUSE_TIME = STDOUT.tty? ? 0.1 : 0.5
|
61
|
-
|
62
52
|
def stop(deadline)
|
63
53
|
quiet
|
64
|
-
fire_event(:shutdown, reverse: true)
|
65
54
|
|
66
55
|
# some of the shutdown events can be async,
|
67
56
|
# we don't have any way to know when they're done but
|
@@ -69,29 +58,20 @@ module Sidekiq
|
|
69
58
|
sleep PAUSE_TIME
|
70
59
|
return if @workers.empty?
|
71
60
|
|
72
|
-
logger.info { "Pausing to allow
|
73
|
-
|
74
|
-
while remaining > PAUSE_TIME
|
75
|
-
return if @workers.empty?
|
76
|
-
sleep PAUSE_TIME
|
77
|
-
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
78
|
-
end
|
61
|
+
logger.info { "Pausing to allow jobs to finish..." }
|
62
|
+
wait_for(deadline) { @workers.empty? }
|
79
63
|
return if @workers.empty?
|
80
64
|
|
81
65
|
hard_shutdown
|
66
|
+
ensure
|
67
|
+
capsule.stop
|
82
68
|
end
|
83
69
|
|
84
|
-
def
|
85
|
-
@plock.synchronize do
|
86
|
-
@workers.delete(processor)
|
87
|
-
end
|
88
|
-
end
|
89
|
-
|
90
|
-
def processor_died(processor, reason)
|
70
|
+
def processor_result(processor, reason = nil)
|
91
71
|
@plock.synchronize do
|
92
72
|
@workers.delete(processor)
|
93
73
|
unless @done
|
94
|
-
p = Processor.new(
|
74
|
+
p = Processor.new(@config, &method(:processor_result))
|
95
75
|
@workers << p
|
96
76
|
p.start
|
97
77
|
end
|
@@ -105,7 +85,7 @@ module Sidekiq
|
|
105
85
|
private
|
106
86
|
|
107
87
|
def hard_shutdown
|
108
|
-
# We've reached the timeout and we still have busy
|
88
|
+
# We've reached the timeout and we still have busy threads.
|
109
89
|
# They must die but their jobs shall live on.
|
110
90
|
cleanup = nil
|
111
91
|
@plock.synchronize do
|
@@ -113,25 +93,42 @@ module Sidekiq
|
|
113
93
|
end
|
114
94
|
|
115
95
|
if cleanup.size > 0
|
116
|
-
jobs = cleanup.map {|p| p.job }.compact
|
96
|
+
jobs = cleanup.map { |p| p.job }.compact
|
117
97
|
|
118
|
-
logger.warn { "Terminating #{cleanup.size} busy
|
119
|
-
logger.
|
98
|
+
logger.warn { "Terminating #{cleanup.size} busy threads" }
|
99
|
+
logger.debug { "Jobs still in progress #{jobs.inspect}" }
|
120
100
|
|
121
101
|
# Re-enqueue unfinished jobs
|
122
102
|
# NOTE: You may notice that we may push a job back to redis before
|
123
|
-
# the
|
103
|
+
# the thread is terminated. This is ok because Sidekiq's
|
124
104
|
# contract says that jobs are run AT LEAST once. Process termination
|
125
105
|
# is delayed until we're certain the jobs are back in Redis because
|
126
106
|
# it is worse to lose a job than to run it twice.
|
127
|
-
|
128
|
-
strategy.bulk_requeue(jobs, @options)
|
107
|
+
capsule.fetcher.bulk_requeue(jobs)
|
129
108
|
end
|
130
109
|
|
131
110
|
cleanup.each do |processor|
|
132
111
|
processor.kill
|
133
112
|
end
|
113
|
+
|
114
|
+
# when this method returns, we immediately call `exit` which may not give
|
115
|
+
# the remaining threads time to run `ensure` blocks, etc. We pause here up
|
116
|
+
# to 3 seconds to give threads a minimal amount of time to run `ensure` blocks.
|
117
|
+
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3
|
118
|
+
wait_for(deadline) { @workers.empty? }
|
134
119
|
end
|
135
120
|
|
121
|
+
# hack for quicker development / testing environment #2774
|
122
|
+
PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
|
123
|
+
|
124
|
+
# Wait for the orblock to be true or the deadline passed.
|
125
|
+
def wait_for(deadline, &condblock)
|
126
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
127
|
+
while remaining > PAUSE_TIME
|
128
|
+
return if condblock.call
|
129
|
+
sleep PAUSE_TIME
|
130
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
131
|
+
end
|
132
|
+
end
|
136
133
|
end
|
137
134
|
end
|