sidekiq 6.4.1 → 7.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +107 -5
- data/README.md +14 -13
- data/bin/sidekiq +3 -8
- data/bin/sidekiqload +26 -29
- data/lib/sidekiq/api.rb +232 -157
- data/lib/sidekiq/capsule.rb +110 -0
- data/lib/sidekiq/cli.rb +80 -86
- data/lib/sidekiq/client.rb +54 -42
- data/lib/sidekiq/component.rb +66 -0
- data/lib/sidekiq/config.rb +271 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +20 -19
- data/lib/sidekiq/job.rb +375 -10
- data/lib/sidekiq/job_logger.rb +1 -1
- data/lib/sidekiq/job_retry.rb +74 -53
- data/lib/sidekiq/job_util.rb +17 -11
- data/lib/sidekiq/launcher.rb +63 -69
- data/lib/sidekiq/logger.rb +6 -45
- data/lib/sidekiq/manager.rb +33 -32
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +134 -0
- data/lib/sidekiq/middleware/chain.rb +84 -42
- data/lib/sidekiq/middleware/current_attributes.rb +18 -17
- data/lib/sidekiq/middleware/i18n.rb +6 -4
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +1 -1
- data/lib/sidekiq/paginator.rb +10 -2
- data/lib/sidekiq/processor.rb +56 -59
- data/lib/sidekiq/rails.rb +10 -9
- data/lib/sidekiq/redis_client_adapter.rb +118 -0
- data/lib/sidekiq/redis_connection.rb +13 -82
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +65 -37
- data/lib/sidekiq/testing/inline.rb +4 -4
- data/lib/sidekiq/testing.rb +41 -68
- data/lib/sidekiq/transaction_aware_client.rb +44 -0
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/action.rb +3 -3
- data/lib/sidekiq/web/application.rb +22 -6
- data/lib/sidekiq/web/csrf_protection.rb +3 -3
- data/lib/sidekiq/web/helpers.rb +21 -19
- data/lib/sidekiq/web.rb +3 -14
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +84 -207
- data/sidekiq.gemspec +29 -5
- data/web/assets/javascripts/application.js +58 -26
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +166 -0
- data/web/assets/javascripts/dashboard.js +3 -240
- data/web/assets/javascripts/metrics.js +236 -0
- data/web/assets/stylesheets/application-rtl.css +2 -91
- data/web/assets/stylesheets/application.css +64 -297
- data/web/locales/ar.yml +70 -70
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +52 -52
- data/web/locales/de.yml +65 -65
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +82 -69
- data/web/locales/es.yml +68 -68
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +67 -67
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +71 -68
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +66 -66
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +63 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +67 -66
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +67 -67
- data/web/locales/zh-cn.yml +37 -11
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +5 -2
- data/web/views/_nav.erb +1 -1
- data/web/views/_summary.erb +1 -1
- data/web/views/busy.erb +9 -4
- data/web/views/dashboard.erb +36 -4
- data/web/views/metrics.erb +80 -0
- data/web/views/metrics_for_job.erb +69 -0
- data/web/views/queue.erb +5 -1
- metadata +69 -22
- data/lib/sidekiq/delay.rb +0 -43
- data/lib/sidekiq/exception_handler.rb +0 -27
- data/lib/sidekiq/extensions/action_mailer.rb +0 -48
- data/lib/sidekiq/extensions/active_record.rb +0 -43
- data/lib/sidekiq/extensions/class_methods.rb +0 -43
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
- data/lib/sidekiq/util.rb +0 -108
- data/lib/sidekiq/worker.rb +0 -362
- /data/{LICENSE → LICENSE.txt} +0 -0
data/lib/sidekiq/launcher.rb
CHANGED
@@ -1,13 +1,14 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require "sidekiq/manager"
|
4
|
-
require "sidekiq/
|
4
|
+
require "sidekiq/capsule"
|
5
5
|
require "sidekiq/scheduled"
|
6
|
+
require "sidekiq/ring_buffer"
|
6
7
|
|
7
8
|
module Sidekiq
|
8
|
-
# The Launcher starts the
|
9
|
+
# The Launcher starts the Capsule Managers, the Poller thread and provides the process heartbeat.
|
9
10
|
class Launcher
|
10
|
-
include
|
11
|
+
include Sidekiq::Component
|
11
12
|
|
12
13
|
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
|
13
14
|
|
@@ -15,50 +16,53 @@ module Sidekiq
|
|
15
16
|
proc { "sidekiq" },
|
16
17
|
proc { Sidekiq::VERSION },
|
17
18
|
proc { |me, data| data["tag"] },
|
18
|
-
proc { |me, data| "[#{Processor::
|
19
|
+
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{me.config.total_concurrency} busy]" },
|
19
20
|
proc { |me, data| "stopping" if me.stopping? }
|
20
21
|
]
|
21
22
|
|
22
|
-
attr_accessor :
|
23
|
+
attr_accessor :managers, :poller
|
23
24
|
|
24
|
-
def initialize(
|
25
|
-
|
26
|
-
@
|
27
|
-
@
|
25
|
+
def initialize(config, embedded: false)
|
26
|
+
@config = config
|
27
|
+
@embedded = embedded
|
28
|
+
@managers = config.capsules.values.map do |cap|
|
29
|
+
Sidekiq::Manager.new(cap)
|
30
|
+
end
|
31
|
+
@poller = Sidekiq::Scheduled::Poller.new(@config)
|
28
32
|
@done = false
|
29
|
-
@options = options
|
30
33
|
end
|
31
34
|
|
32
35
|
def run
|
36
|
+
Sidekiq.freeze!
|
33
37
|
@thread = safe_thread("heartbeat", &method(:start_heartbeat))
|
34
38
|
@poller.start
|
35
|
-
@
|
39
|
+
@managers.each(&:start)
|
36
40
|
end
|
37
41
|
|
38
42
|
# Stops this instance from processing any more jobs,
|
39
43
|
#
|
40
44
|
def quiet
|
45
|
+
return if @done
|
46
|
+
|
41
47
|
@done = true
|
42
|
-
@
|
48
|
+
@managers.each(&:quiet)
|
43
49
|
@poller.terminate
|
50
|
+
fire_event(:quiet, reverse: true)
|
44
51
|
end
|
45
52
|
|
46
|
-
# Shuts down
|
47
|
-
# return until all work is complete and cleaned up.
|
48
|
-
# It can take up to the timeout to complete.
|
53
|
+
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
|
49
54
|
def stop
|
50
|
-
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @
|
51
|
-
|
52
|
-
@done = true
|
53
|
-
@manager.quiet
|
54
|
-
@poller.terminate
|
55
|
+
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
|
55
56
|
|
56
|
-
|
57
|
+
quiet
|
58
|
+
stoppers = @managers.map do |mgr|
|
59
|
+
Thread.new do
|
60
|
+
mgr.stop(deadline)
|
61
|
+
end
|
62
|
+
end
|
57
63
|
|
58
|
-
|
59
|
-
|
60
|
-
strategy = @options[:fetch]
|
61
|
-
strategy.bulk_requeue([], @options)
|
64
|
+
fire_event(:shutdown, reverse: true)
|
65
|
+
stoppers.each(&:join)
|
62
66
|
|
63
67
|
clear_heartbeat
|
64
68
|
end
|
@@ -69,24 +73,26 @@ module Sidekiq
|
|
69
73
|
|
70
74
|
private unless $TESTING
|
71
75
|
|
72
|
-
BEAT_PAUSE =
|
76
|
+
BEAT_PAUSE = 10
|
73
77
|
|
74
78
|
def start_heartbeat
|
75
79
|
loop do
|
76
80
|
heartbeat
|
77
81
|
sleep BEAT_PAUSE
|
78
82
|
end
|
79
|
-
|
83
|
+
logger.info("Heartbeat stopping...")
|
80
84
|
end
|
81
85
|
|
82
86
|
def clear_heartbeat
|
87
|
+
flush_stats
|
88
|
+
|
83
89
|
# Remove record from Redis since we are shutting down.
|
84
90
|
# Note we don't stop the heartbeat thread; if the process
|
85
91
|
# doesn't actually exit, it'll reappear in the Web UI.
|
86
|
-
|
92
|
+
redis do |conn|
|
87
93
|
conn.pipelined do |pipeline|
|
88
|
-
pipeline.srem("processes", identity)
|
89
|
-
pipeline.unlink("#{identity}:
|
94
|
+
pipeline.srem("processes", [identity])
|
95
|
+
pipeline.unlink("#{identity}:work")
|
90
96
|
end
|
91
97
|
end
|
92
98
|
rescue
|
@@ -94,19 +100,19 @@ module Sidekiq
|
|
94
100
|
end
|
95
101
|
|
96
102
|
def heartbeat
|
97
|
-
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
|
103
|
+
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ") unless @embedded
|
98
104
|
|
99
105
|
❤
|
100
106
|
end
|
101
107
|
|
102
|
-
def
|
108
|
+
def flush_stats
|
103
109
|
fails = Processor::FAILURE.reset
|
104
110
|
procd = Processor::PROCESSED.reset
|
105
111
|
return if fails + procd == 0
|
106
112
|
|
107
113
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
108
114
|
begin
|
109
|
-
|
115
|
+
redis do |conn|
|
110
116
|
conn.pipelined do |pipeline|
|
111
117
|
pipeline.incrby("stat:processed", procd)
|
112
118
|
pipeline.incrby("stat:processed:#{nowdate}", procd)
|
@@ -118,40 +124,27 @@ module Sidekiq
|
|
118
124
|
end
|
119
125
|
end
|
120
126
|
rescue => ex
|
121
|
-
|
122
|
-
# try to handle the exception
|
123
|
-
Sidekiq.logger.warn("Unable to flush stats: #{ex}")
|
127
|
+
logger.warn("Unable to flush stats: #{ex}")
|
124
128
|
end
|
125
129
|
end
|
126
|
-
at_exit(&method(:flush_stats))
|
127
130
|
|
128
131
|
def ❤
|
129
132
|
key = identity
|
130
133
|
fails = procd = 0
|
131
134
|
|
132
135
|
begin
|
133
|
-
|
134
|
-
|
135
|
-
curstate = Processor::
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
conn.multi do |transaction|
|
142
|
-
transaction.incrby("stat:processed", procd)
|
143
|
-
transaction.incrby("stat:processed:#{nowdate}", procd)
|
144
|
-
transaction.expire("stat:processed:#{nowdate}", STATS_TTL)
|
145
|
-
|
146
|
-
transaction.incrby("stat:failed", fails)
|
147
|
-
transaction.incrby("stat:failed:#{nowdate}", fails)
|
148
|
-
transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
|
149
|
-
|
150
|
-
transaction.unlink(workers_key)
|
136
|
+
flush_stats
|
137
|
+
|
138
|
+
curstate = Processor::WORK_STATE.dup
|
139
|
+
redis do |conn|
|
140
|
+
# work is the current set of executing jobs
|
141
|
+
work_key = "#{key}:work"
|
142
|
+
conn.pipelined do |transaction|
|
143
|
+
transaction.unlink(work_key)
|
151
144
|
curstate.each_pair do |tid, hash|
|
152
|
-
transaction.hset(
|
145
|
+
transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
|
153
146
|
end
|
154
|
-
transaction.expire(
|
147
|
+
transaction.expire(work_key, 60)
|
155
148
|
end
|
156
149
|
end
|
157
150
|
|
@@ -160,15 +153,15 @@ module Sidekiq
|
|
160
153
|
fails = procd = 0
|
161
154
|
kb = memory_usage(::Process.pid)
|
162
155
|
|
163
|
-
_, exists, _, _, msg =
|
156
|
+
_, exists, _, _, msg = redis { |conn|
|
164
157
|
conn.multi { |transaction|
|
165
|
-
transaction.sadd("processes", key)
|
166
|
-
transaction.exists
|
158
|
+
transaction.sadd("processes", [key])
|
159
|
+
transaction.exists(key)
|
167
160
|
transaction.hmset(key, "info", to_json,
|
168
161
|
"busy", curstate.size,
|
169
162
|
"beat", Time.now.to_f,
|
170
163
|
"rtt_us", rtt,
|
171
|
-
"quiet", @done,
|
164
|
+
"quiet", @done.to_s,
|
172
165
|
"rss", kb)
|
173
166
|
transaction.expire(key, 60)
|
174
167
|
transaction.rpop("#{key}-signals")
|
@@ -176,7 +169,8 @@ module Sidekiq
|
|
176
169
|
}
|
177
170
|
|
178
171
|
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
|
179
|
-
fire_event(:heartbeat) unless exists
|
172
|
+
fire_event(:heartbeat) unless exists > 0
|
173
|
+
fire_event(:beat, oneshot: false)
|
180
174
|
|
181
175
|
return unless msg
|
182
176
|
|
@@ -198,7 +192,7 @@ module Sidekiq
|
|
198
192
|
|
199
193
|
def check_rtt
|
200
194
|
a = b = 0
|
201
|
-
|
195
|
+
redis do |x|
|
202
196
|
a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
203
197
|
x.ping
|
204
198
|
b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
@@ -209,12 +203,12 @@ module Sidekiq
|
|
209
203
|
# Workable is < 10,000µs
|
210
204
|
# Log a warning if it's a disaster.
|
211
205
|
if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
|
212
|
-
|
206
|
+
logger.warn <<~EOM
|
213
207
|
Your Redis network connection is performing extremely poorly.
|
214
208
|
Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
|
215
209
|
Ensure Redis is running in the same AZ or datacenter as Sidekiq.
|
216
210
|
If these values are close to 100,000, that means your Sidekiq process may be
|
217
|
-
CPU
|
211
|
+
CPU-saturated; reduce your concurrency and/or see https://github.com/mperham/sidekiq/discussions/5039
|
218
212
|
EOM
|
219
213
|
RTT_READINGS.reset
|
220
214
|
end
|
@@ -246,10 +240,10 @@ module Sidekiq
|
|
246
240
|
"hostname" => hostname,
|
247
241
|
"started_at" => Time.now.to_f,
|
248
242
|
"pid" => ::Process.pid,
|
249
|
-
"tag" => @
|
250
|
-
"concurrency" => @
|
251
|
-
"queues" => @
|
252
|
-
"labels" => @
|
243
|
+
"tag" => @config[:tag] || "",
|
244
|
+
"concurrency" => @config.total_concurrency,
|
245
|
+
"queues" => @config.capsules.values.map { |cap| cap.queues }.flatten.uniq,
|
246
|
+
"labels" => @config[:labels].to_a,
|
253
247
|
"identity" => identity
|
254
248
|
}
|
255
249
|
end
|
data/lib/sidekiq/logger.rb
CHANGED
@@ -18,7 +18,7 @@ module Sidekiq
|
|
18
18
|
end
|
19
19
|
|
20
20
|
def self.add(k, v)
|
21
|
-
|
21
|
+
current[k] = v
|
22
22
|
end
|
23
23
|
end
|
24
24
|
|
@@ -31,28 +31,14 @@ module Sidekiq
|
|
31
31
|
"fatal" => 4
|
32
32
|
}
|
33
33
|
LEVELS.default_proc = proc do |_, level|
|
34
|
-
|
34
|
+
puts("Invalid log level: #{level.inspect}")
|
35
35
|
nil
|
36
36
|
end
|
37
37
|
|
38
|
-
|
39
|
-
level
|
40
|
-
|
41
|
-
|
42
|
-
def info?
|
43
|
-
level <= 1
|
44
|
-
end
|
45
|
-
|
46
|
-
def warn?
|
47
|
-
level <= 2
|
48
|
-
end
|
49
|
-
|
50
|
-
def error?
|
51
|
-
level <= 3
|
52
|
-
end
|
53
|
-
|
54
|
-
def fatal?
|
55
|
-
level <= 4
|
38
|
+
LEVELS.each do |level, numeric_level|
|
39
|
+
define_method("#{level}?") do
|
40
|
+
local_level.nil? ? super() : local_level <= numeric_level
|
41
|
+
end
|
56
42
|
end
|
57
43
|
|
58
44
|
def local_level
|
@@ -84,36 +70,11 @@ module Sidekiq
|
|
84
70
|
ensure
|
85
71
|
self.local_level = old_local_level
|
86
72
|
end
|
87
|
-
|
88
|
-
# Redefined to check severity against #level, and thus the thread-local level, rather than +@level+.
|
89
|
-
# FIXME: Remove when the minimum Ruby version supports overriding Logger#level.
|
90
|
-
def add(severity, message = nil, progname = nil, &block)
|
91
|
-
severity ||= ::Logger::UNKNOWN
|
92
|
-
progname ||= @progname
|
93
|
-
|
94
|
-
return true if @logdev.nil? || severity < level
|
95
|
-
|
96
|
-
if message.nil?
|
97
|
-
if block
|
98
|
-
message = yield
|
99
|
-
else
|
100
|
-
message = progname
|
101
|
-
progname = @progname
|
102
|
-
end
|
103
|
-
end
|
104
|
-
|
105
|
-
@logdev.write format_message(format_severity(severity), Time.now, progname, message)
|
106
|
-
end
|
107
73
|
end
|
108
74
|
|
109
75
|
class Logger < ::Logger
|
110
76
|
include LoggingUtils
|
111
77
|
|
112
|
-
def initialize(*args, **kwargs)
|
113
|
-
super
|
114
|
-
self.formatter = Sidekiq.log_formatter
|
115
|
-
end
|
116
|
-
|
117
78
|
module Formatters
|
118
79
|
class Base < ::Logger::Formatter
|
119
80
|
def tid
|
data/lib/sidekiq/manager.rb
CHANGED
@@ -1,8 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "sidekiq/util"
|
4
3
|
require "sidekiq/processor"
|
5
|
-
require "sidekiq/fetch"
|
6
4
|
require "set"
|
7
5
|
|
8
6
|
module Sidekiq
|
@@ -21,43 +19,38 @@ module Sidekiq
|
|
21
19
|
# the shutdown process. The other tasks are performed by other threads.
|
22
20
|
#
|
23
21
|
class Manager
|
24
|
-
include
|
22
|
+
include Sidekiq::Component
|
25
23
|
|
26
24
|
attr_reader :workers
|
27
|
-
attr_reader :
|
25
|
+
attr_reader :capsule
|
28
26
|
|
29
|
-
def initialize(
|
30
|
-
|
31
|
-
@
|
32
|
-
@count = options[:concurrency] || 10
|
27
|
+
def initialize(capsule)
|
28
|
+
@config = @capsule = capsule
|
29
|
+
@count = capsule.concurrency
|
33
30
|
raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
|
34
31
|
|
35
32
|
@done = false
|
36
33
|
@workers = Set.new
|
34
|
+
@plock = Mutex.new
|
37
35
|
@count.times do
|
38
|
-
@workers << Processor.new(
|
36
|
+
@workers << Processor.new(@config, &method(:processor_result))
|
39
37
|
end
|
40
|
-
@plock = Mutex.new
|
41
38
|
end
|
42
39
|
|
43
40
|
def start
|
44
|
-
@workers.each
|
45
|
-
x.start
|
46
|
-
end
|
41
|
+
@workers.each(&:start)
|
47
42
|
end
|
48
43
|
|
49
44
|
def quiet
|
50
45
|
return if @done
|
51
46
|
@done = true
|
52
47
|
|
53
|
-
logger.info { "Terminating quiet
|
54
|
-
@workers.each
|
55
|
-
fire_event(:quiet, reverse: true)
|
48
|
+
logger.info { "Terminating quiet threads for #{capsule.name} capsule" }
|
49
|
+
@workers.each(&:terminate)
|
56
50
|
end
|
57
51
|
|
58
52
|
def stop(deadline)
|
59
53
|
quiet
|
60
|
-
fire_event(:shutdown, reverse: true)
|
61
54
|
|
62
55
|
# some of the shutdown events can be async,
|
63
56
|
# we don't have any way to know when they're done but
|
@@ -65,24 +58,20 @@ module Sidekiq
|
|
65
58
|
sleep PAUSE_TIME
|
66
59
|
return if @workers.empty?
|
67
60
|
|
68
|
-
logger.info { "Pausing to allow
|
61
|
+
logger.info { "Pausing to allow jobs to finish..." }
|
69
62
|
wait_for(deadline) { @workers.empty? }
|
70
63
|
return if @workers.empty?
|
71
64
|
|
72
65
|
hard_shutdown
|
66
|
+
ensure
|
67
|
+
capsule.stop
|
73
68
|
end
|
74
69
|
|
75
|
-
def
|
76
|
-
@plock.synchronize do
|
77
|
-
@workers.delete(processor)
|
78
|
-
end
|
79
|
-
end
|
80
|
-
|
81
|
-
def processor_died(processor, reason)
|
70
|
+
def processor_result(processor, reason = nil)
|
82
71
|
@plock.synchronize do
|
83
72
|
@workers.delete(processor)
|
84
73
|
unless @done
|
85
|
-
p = Processor.new(
|
74
|
+
p = Processor.new(@config, &method(:processor_result))
|
86
75
|
@workers << p
|
87
76
|
p.start
|
88
77
|
end
|
@@ -96,7 +85,7 @@ module Sidekiq
|
|
96
85
|
private
|
97
86
|
|
98
87
|
def hard_shutdown
|
99
|
-
# We've reached the timeout and we still have busy
|
88
|
+
# We've reached the timeout and we still have busy threads.
|
100
89
|
# They must die but their jobs shall live on.
|
101
90
|
cleanup = nil
|
102
91
|
@plock.synchronize do
|
@@ -106,17 +95,16 @@ module Sidekiq
|
|
106
95
|
if cleanup.size > 0
|
107
96
|
jobs = cleanup.map { |p| p.job }.compact
|
108
97
|
|
109
|
-
logger.warn { "Terminating #{cleanup.size} busy
|
110
|
-
logger.
|
98
|
+
logger.warn { "Terminating #{cleanup.size} busy threads" }
|
99
|
+
logger.debug { "Jobs still in progress #{jobs.inspect}" }
|
111
100
|
|
112
101
|
# Re-enqueue unfinished jobs
|
113
102
|
# NOTE: You may notice that we may push a job back to redis before
|
114
|
-
# the
|
103
|
+
# the thread is terminated. This is ok because Sidekiq's
|
115
104
|
# contract says that jobs are run AT LEAST once. Process termination
|
116
105
|
# is delayed until we're certain the jobs are back in Redis because
|
117
106
|
# it is worse to lose a job than to run it twice.
|
118
|
-
|
119
|
-
strategy.bulk_requeue(jobs, @options)
|
107
|
+
capsule.fetcher.bulk_requeue(jobs)
|
120
108
|
end
|
121
109
|
|
122
110
|
cleanup.each do |processor|
|
@@ -129,5 +117,18 @@ module Sidekiq
|
|
129
117
|
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3
|
130
118
|
wait_for(deadline) { @workers.empty? }
|
131
119
|
end
|
120
|
+
|
121
|
+
# hack for quicker development / testing environment #2774
|
122
|
+
PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
|
123
|
+
|
124
|
+
# Wait for the orblock to be true or the deadline passed.
|
125
|
+
def wait_for(deadline, &condblock)
|
126
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
127
|
+
while remaining > PAUSE_TIME
|
128
|
+
return if condblock.call
|
129
|
+
sleep PAUSE_TIME
|
130
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
131
|
+
end
|
132
|
+
end
|
132
133
|
end
|
133
134
|
end
|
@@ -0,0 +1,153 @@
|
|
1
|
+
require "sidekiq"
|
2
|
+
require "date"
|
3
|
+
require "set"
|
4
|
+
|
5
|
+
require "sidekiq/metrics/shared"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
module Metrics
|
9
|
+
# Allows caller to query for Sidekiq execution metrics within Redis.
|
10
|
+
# Caller sets a set of attributes to act as filters. {#fetch} will call
|
11
|
+
# Redis and return a Hash of results.
|
12
|
+
#
|
13
|
+
# NB: all metrics and times/dates are UTC only. We specifically do not
|
14
|
+
# support timezones.
|
15
|
+
class Query
|
16
|
+
def initialize(pool: nil, now: Time.now)
|
17
|
+
@time = now.utc
|
18
|
+
@pool = pool || Sidekiq.default_configuration.redis_pool
|
19
|
+
@klass = nil
|
20
|
+
end
|
21
|
+
|
22
|
+
# Get metric data for all jobs from the last hour
|
23
|
+
def top_jobs(minutes: 60)
|
24
|
+
result = Result.new
|
25
|
+
|
26
|
+
time = @time
|
27
|
+
redis_results = @pool.with do |conn|
|
28
|
+
conn.pipelined do |pipe|
|
29
|
+
minutes.times do |idx|
|
30
|
+
key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
|
31
|
+
pipe.hgetall key
|
32
|
+
result.prepend_bucket time
|
33
|
+
time -= 60
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
time = @time
|
39
|
+
redis_results.each do |hash|
|
40
|
+
hash.each do |k, v|
|
41
|
+
kls, metric = k.split("|")
|
42
|
+
result.job_results[kls].add_metric metric, time, v.to_i
|
43
|
+
end
|
44
|
+
time -= 60
|
45
|
+
end
|
46
|
+
|
47
|
+
result.marks = fetch_marks(result.starts_at..result.ends_at)
|
48
|
+
|
49
|
+
result
|
50
|
+
end
|
51
|
+
|
52
|
+
def for_job(klass, minutes: 60)
|
53
|
+
result = Result.new
|
54
|
+
|
55
|
+
time = @time
|
56
|
+
redis_results = @pool.with do |conn|
|
57
|
+
conn.pipelined do |pipe|
|
58
|
+
minutes.times do |idx|
|
59
|
+
key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
|
60
|
+
pipe.hmget key, "#{klass}|ms", "#{klass}|p", "#{klass}|f"
|
61
|
+
result.prepend_bucket time
|
62
|
+
time -= 60
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
time = @time
|
68
|
+
@pool.with do |conn|
|
69
|
+
redis_results.each do |(ms, p, f)|
|
70
|
+
result.job_results[klass].add_metric "ms", time, ms.to_i if ms
|
71
|
+
result.job_results[klass].add_metric "p", time, p.to_i if p
|
72
|
+
result.job_results[klass].add_metric "f", time, f.to_i if f
|
73
|
+
result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time)
|
74
|
+
time -= 60
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
result.marks = fetch_marks(result.starts_at..result.ends_at)
|
79
|
+
|
80
|
+
result
|
81
|
+
end
|
82
|
+
|
83
|
+
class Result < Struct.new(:starts_at, :ends_at, :size, :buckets, :job_results, :marks)
|
84
|
+
def initialize
|
85
|
+
super
|
86
|
+
self.buckets = []
|
87
|
+
self.marks = []
|
88
|
+
self.job_results = Hash.new { |h, k| h[k] = JobResult.new }
|
89
|
+
end
|
90
|
+
|
91
|
+
def prepend_bucket(time)
|
92
|
+
buckets.unshift time.strftime("%H:%M")
|
93
|
+
self.ends_at ||= time
|
94
|
+
self.starts_at = time
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
class JobResult < Struct.new(:series, :hist, :totals)
|
99
|
+
def initialize
|
100
|
+
super
|
101
|
+
self.series = Hash.new { |h, k| h[k] = Hash.new(0) }
|
102
|
+
self.hist = Hash.new { |h, k| h[k] = [] }
|
103
|
+
self.totals = Hash.new(0)
|
104
|
+
end
|
105
|
+
|
106
|
+
def add_metric(metric, time, value)
|
107
|
+
totals[metric] += value
|
108
|
+
series[metric][time.strftime("%H:%M")] += value
|
109
|
+
|
110
|
+
# Include timing measurements in seconds for convenience
|
111
|
+
add_metric("s", time, value / 1000.0) if metric == "ms"
|
112
|
+
end
|
113
|
+
|
114
|
+
def add_hist(time, hist_result)
|
115
|
+
hist[time.strftime("%H:%M")] = hist_result
|
116
|
+
end
|
117
|
+
|
118
|
+
def total_avg(metric = "ms")
|
119
|
+
completed = totals["p"] - totals["f"]
|
120
|
+
totals[metric].to_f / completed
|
121
|
+
end
|
122
|
+
|
123
|
+
def series_avg(metric = "ms")
|
124
|
+
series[metric].each_with_object(Hash.new(0)) do |(bucket, value), result|
|
125
|
+
completed = series.dig("p", bucket) - series.dig("f", bucket)
|
126
|
+
result[bucket] = completed == 0 ? 0 : value.to_f / completed
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
class MarkResult < Struct.new(:time, :label)
|
132
|
+
def bucket
|
133
|
+
time.strftime("%H:%M")
|
134
|
+
end
|
135
|
+
end
|
136
|
+
|
137
|
+
private
|
138
|
+
|
139
|
+
def fetch_marks(time_range)
|
140
|
+
[].tap do |result|
|
141
|
+
marks = @pool.with { |c| c.hgetall("#{@time.strftime("%Y%m%d")}-marks") }
|
142
|
+
|
143
|
+
marks.each do |timestamp, label|
|
144
|
+
time = Time.parse(timestamp)
|
145
|
+
if time_range.cover? time
|
146
|
+
result << MarkResult.new(time, label)
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|