sidekiq 6.5.12 → 7.0.6
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +63 -22
- data/README.md +40 -32
- data/bin/sidekiq +3 -8
- data/bin/sidekiqload +186 -118
- data/bin/sidekiqmon +3 -0
- data/lib/sidekiq/api.rb +84 -121
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +55 -74
- data/lib/sidekiq/client.rb +29 -16
- data/lib/sidekiq/component.rb +3 -0
- data/lib/sidekiq/config.rb +270 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +11 -14
- data/lib/sidekiq/job.rb +375 -10
- data/lib/sidekiq/job_logger.rb +2 -2
- data/lib/sidekiq/job_retry.rb +9 -9
- data/lib/sidekiq/job_util.rb +48 -14
- data/lib/sidekiq/launcher.rb +64 -61
- data/lib/sidekiq/logger.rb +1 -26
- data/lib/sidekiq/manager.rb +9 -11
- data/lib/sidekiq/metrics/query.rb +2 -2
- data/lib/sidekiq/metrics/shared.rb +4 -3
- data/lib/sidekiq/metrics/tracking.rb +20 -18
- data/lib/sidekiq/middleware/chain.rb +19 -18
- data/lib/sidekiq/middleware/current_attributes.rb +8 -15
- data/lib/sidekiq/monitor.rb +16 -3
- data/lib/sidekiq/processor.rb +21 -27
- data/lib/sidekiq/rails.rb +13 -17
- data/lib/sidekiq/redis_client_adapter.rb +8 -47
- data/lib/sidekiq/redis_connection.rb +11 -111
- data/lib/sidekiq/scheduled.rb +20 -21
- data/lib/sidekiq/testing.rb +5 -33
- data/lib/sidekiq/transaction_aware_client.rb +4 -5
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/application.rb +21 -6
- data/lib/sidekiq/web/csrf_protection.rb +1 -1
- data/lib/sidekiq/web/helpers.rb +16 -15
- data/lib/sidekiq/web.rb +6 -17
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +76 -274
- data/sidekiq.gemspec +20 -10
- data/web/assets/javascripts/application.js +18 -1
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/dashboard-charts.js +166 -0
- data/web/assets/javascripts/dashboard.js +3 -223
- data/web/assets/javascripts/metrics.js +117 -115
- data/web/assets/stylesheets/application-dark.css +4 -0
- data/web/assets/stylesheets/application-rtl.css +2 -91
- data/web/assets/stylesheets/application.css +23 -298
- data/web/locales/ar.yml +70 -70
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -65
- data/web/locales/el.yml +2 -7
- data/web/locales/en.yml +76 -70
- data/web/locales/es.yml +68 -68
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +67 -67
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +64 -68
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +66 -66
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +59 -69
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +67 -66
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +67 -67
- data/web/locales/zh-cn.yml +20 -18
- data/web/locales/zh-tw.yml +10 -1
- data/web/views/_footer.erb +5 -2
- data/web/views/_job_info.erb +18 -2
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +1 -1
- data/web/views/busy.erb +37 -26
- data/web/views/dashboard.erb +36 -5
- data/web/views/metrics.erb +33 -20
- data/web/views/metrics_for_job.erb +22 -38
- data/web/views/morgue.erb +5 -9
- data/web/views/queue.erb +10 -14
- data/web/views/queues.erb +3 -1
- data/web/views/retries.erb +5 -9
- data/web/views/scheduled.erb +12 -13
- metadata +50 -40
- data/lib/sidekiq/delay.rb +0 -43
- data/lib/sidekiq/extensions/action_mailer.rb +0 -48
- data/lib/sidekiq/extensions/active_record.rb +0 -43
- data/lib/sidekiq/extensions/class_methods.rb +0 -43
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
- data/lib/sidekiq/metrics/deploy.rb +0 -47
- data/lib/sidekiq/worker.rb +0 -370
- data/web/assets/javascripts/graph.js +0 -16
- /data/{LICENSE → LICENSE.txt} +0 -0
data/lib/sidekiq/launcher.rb
CHANGED
@@ -1,12 +1,12 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require "sidekiq/manager"
|
4
|
-
require "sidekiq/
|
4
|
+
require "sidekiq/capsule"
|
5
5
|
require "sidekiq/scheduled"
|
6
6
|
require "sidekiq/ring_buffer"
|
7
7
|
|
8
8
|
module Sidekiq
|
9
|
-
# The Launcher starts the
|
9
|
+
# The Launcher starts the Capsule Managers, the Poller thread and provides the process heartbeat.
|
10
10
|
class Launcher
|
11
11
|
include Sidekiq::Component
|
12
12
|
|
@@ -16,48 +16,55 @@ module Sidekiq
|
|
16
16
|
proc { "sidekiq" },
|
17
17
|
proc { Sidekiq::VERSION },
|
18
18
|
proc { |me, data| data["tag"] },
|
19
|
-
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{
|
19
|
+
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{me.config.total_concurrency} busy]" },
|
20
20
|
proc { |me, data| "stopping" if me.stopping? }
|
21
21
|
]
|
22
22
|
|
23
|
-
attr_accessor :
|
23
|
+
attr_accessor :managers, :poller
|
24
24
|
|
25
|
-
def initialize(
|
26
|
-
@config =
|
27
|
-
|
28
|
-
@
|
29
|
-
|
25
|
+
def initialize(config, embedded: false)
|
26
|
+
@config = config
|
27
|
+
@embedded = embedded
|
28
|
+
@managers = config.capsules.values.map do |cap|
|
29
|
+
Sidekiq::Manager.new(cap)
|
30
|
+
end
|
31
|
+
@poller = Sidekiq::Scheduled::Poller.new(@config)
|
30
32
|
@done = false
|
31
33
|
end
|
32
34
|
|
33
|
-
|
34
|
-
|
35
|
+
# Start this Sidekiq instance. If an embedding process already
|
36
|
+
# has a heartbeat thread, caller can use `async_beat: false`
|
37
|
+
# and instead have thread call Launcher#heartbeat every N seconds.
|
38
|
+
def run(async_beat: true)
|
39
|
+
Sidekiq.freeze!
|
40
|
+
@thread = safe_thread("heartbeat", &method(:start_heartbeat)) if async_beat
|
35
41
|
@poller.start
|
36
|
-
@
|
42
|
+
@managers.each(&:start)
|
37
43
|
end
|
38
44
|
|
39
45
|
# Stops this instance from processing any more jobs,
|
40
|
-
#
|
41
46
|
def quiet
|
47
|
+
return if @done
|
48
|
+
|
42
49
|
@done = true
|
43
|
-
@
|
50
|
+
@managers.each(&:quiet)
|
44
51
|
@poller.terminate
|
52
|
+
fire_event(:quiet, reverse: true)
|
45
53
|
end
|
46
54
|
|
47
55
|
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
|
48
56
|
def stop
|
49
57
|
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
|
50
58
|
|
51
|
-
|
52
|
-
@
|
53
|
-
|
54
|
-
|
55
|
-
|
59
|
+
quiet
|
60
|
+
stoppers = @managers.map do |mgr|
|
61
|
+
Thread.new do
|
62
|
+
mgr.stop(deadline)
|
63
|
+
end
|
64
|
+
end
|
56
65
|
|
57
|
-
|
58
|
-
|
59
|
-
strategy = @config[:fetch]
|
60
|
-
strategy.bulk_requeue([], @config)
|
66
|
+
fire_event(:shutdown, reverse: true)
|
67
|
+
stoppers.each(&:join)
|
61
68
|
|
62
69
|
clear_heartbeat
|
63
70
|
end
|
@@ -66,18 +73,30 @@ module Sidekiq
|
|
66
73
|
@done
|
67
74
|
end
|
68
75
|
|
76
|
+
# If embedding Sidekiq, you can have the process heartbeat
|
77
|
+
# call this method to regularly heartbeat rather than creating
|
78
|
+
# a separate thread.
|
79
|
+
def heartbeat
|
80
|
+
❤
|
81
|
+
end
|
82
|
+
|
69
83
|
private unless $TESTING
|
70
84
|
|
71
|
-
BEAT_PAUSE =
|
85
|
+
BEAT_PAUSE = 10
|
72
86
|
|
73
87
|
def start_heartbeat
|
74
88
|
loop do
|
75
|
-
|
89
|
+
beat
|
76
90
|
sleep BEAT_PAUSE
|
77
91
|
end
|
78
92
|
logger.info("Heartbeat stopping...")
|
79
93
|
end
|
80
94
|
|
95
|
+
def beat
|
96
|
+
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ") unless @embedded
|
97
|
+
❤
|
98
|
+
end
|
99
|
+
|
81
100
|
def clear_heartbeat
|
82
101
|
flush_stats
|
83
102
|
|
@@ -94,12 +113,6 @@ module Sidekiq
|
|
94
113
|
# best effort, ignore network errors
|
95
114
|
end
|
96
115
|
|
97
|
-
def heartbeat
|
98
|
-
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
|
99
|
-
|
100
|
-
❤
|
101
|
-
end
|
102
|
-
|
103
116
|
def flush_stats
|
104
117
|
fails = Processor::FAILURE.reset
|
105
118
|
procd = Processor::PROCESSED.reset
|
@@ -107,7 +120,7 @@ module Sidekiq
|
|
107
120
|
|
108
121
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
109
122
|
begin
|
110
|
-
|
123
|
+
redis do |conn|
|
111
124
|
conn.pipelined do |pipeline|
|
112
125
|
pipeline.incrby("stat:processed", procd)
|
113
126
|
pipeline.incrby("stat:processed:#{nowdate}", procd)
|
@@ -119,9 +132,7 @@ module Sidekiq
|
|
119
132
|
end
|
120
133
|
end
|
121
134
|
rescue => ex
|
122
|
-
|
123
|
-
# try to handle the exception
|
124
|
-
Sidekiq.logger.warn("Unable to flush stats: #{ex}")
|
135
|
+
logger.warn("Unable to flush stats: #{ex}")
|
125
136
|
end
|
126
137
|
end
|
127
138
|
|
@@ -130,23 +141,10 @@ module Sidekiq
|
|
130
141
|
fails = procd = 0
|
131
142
|
|
132
143
|
begin
|
133
|
-
|
134
|
-
procd = Processor::PROCESSED.reset
|
135
|
-
curstate = Processor::WORK_STATE.dup
|
136
|
-
|
137
|
-
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
144
|
+
flush_stats
|
138
145
|
|
146
|
+
curstate = Processor::WORK_STATE.dup
|
139
147
|
redis do |conn|
|
140
|
-
conn.multi do |transaction|
|
141
|
-
transaction.incrby("stat:processed", procd)
|
142
|
-
transaction.incrby("stat:processed:#{nowdate}", procd)
|
143
|
-
transaction.expire("stat:processed:#{nowdate}", STATS_TTL)
|
144
|
-
|
145
|
-
transaction.incrby("stat:failed", fails)
|
146
|
-
transaction.incrby("stat:failed:#{nowdate}", fails)
|
147
|
-
transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
|
148
|
-
end
|
149
|
-
|
150
148
|
# work is the current set of executing jobs
|
151
149
|
work_key = "#{key}:work"
|
152
150
|
conn.pipelined do |transaction|
|
@@ -163,10 +161,10 @@ module Sidekiq
|
|
163
161
|
fails = procd = 0
|
164
162
|
kb = memory_usage(::Process.pid)
|
165
163
|
|
166
|
-
_, exists, _, _,
|
164
|
+
_, exists, _, _, signal = redis { |conn|
|
167
165
|
conn.multi { |transaction|
|
168
166
|
transaction.sadd("processes", [key])
|
169
|
-
transaction.exists
|
167
|
+
transaction.exists(key)
|
170
168
|
transaction.hmset(key, "info", to_json,
|
171
169
|
"busy", curstate.size,
|
172
170
|
"beat", Time.now.to_f,
|
@@ -179,12 +177,10 @@ module Sidekiq
|
|
179
177
|
}
|
180
178
|
|
181
179
|
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
|
182
|
-
fire_event(:heartbeat) unless exists
|
180
|
+
fire_event(:heartbeat) unless exists > 0
|
183
181
|
fire_event(:beat, oneshot: false)
|
184
182
|
|
185
|
-
|
186
|
-
|
187
|
-
::Process.kill(msg, ::Process.pid)
|
183
|
+
::Process.kill(signal, ::Process.pid) if signal && !@embedded
|
188
184
|
rescue => e
|
189
185
|
# ignore all redis/network issues
|
190
186
|
logger.error("heartbeat: #{e}")
|
@@ -218,7 +214,7 @@ module Sidekiq
|
|
218
214
|
Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
|
219
215
|
Ensure Redis is running in the same AZ or datacenter as Sidekiq.
|
220
216
|
If these values are close to 100,000, that means your Sidekiq process may be
|
221
|
-
CPU-saturated; reduce your concurrency and/or see https://github.com/
|
217
|
+
CPU-saturated; reduce your concurrency and/or see https://github.com/sidekiq/sidekiq/discussions/5039
|
222
218
|
EOM
|
223
219
|
RTT_READINGS.reset
|
224
220
|
end
|
@@ -251,13 +247,20 @@ module Sidekiq
|
|
251
247
|
"started_at" => Time.now.to_f,
|
252
248
|
"pid" => ::Process.pid,
|
253
249
|
"tag" => @config[:tag] || "",
|
254
|
-
"concurrency" => @config
|
255
|
-
"queues" => @config
|
256
|
-
"
|
257
|
-
"
|
250
|
+
"concurrency" => @config.total_concurrency,
|
251
|
+
"queues" => @config.capsules.values.flat_map { |cap| cap.queues }.uniq,
|
252
|
+
"weights" => to_weights,
|
253
|
+
"labels" => @config[:labels].to_a,
|
254
|
+
"identity" => identity,
|
255
|
+
"version" => Sidekiq::VERSION,
|
256
|
+
"embedded" => @embedded
|
258
257
|
}
|
259
258
|
end
|
260
259
|
|
260
|
+
def to_weights
|
261
|
+
@config.capsules.values.map(&:weights)
|
262
|
+
end
|
263
|
+
|
261
264
|
def to_json
|
262
265
|
# this data changes infrequently so dump it to a string
|
263
266
|
# now so we don't need to dump it every heartbeat.
|
data/lib/sidekiq/logger.rb
CHANGED
@@ -31,7 +31,7 @@ module Sidekiq
|
|
31
31
|
"fatal" => 4
|
32
32
|
}
|
33
33
|
LEVELS.default_proc = proc do |_, level|
|
34
|
-
|
34
|
+
puts("Invalid log level: #{level.inspect}")
|
35
35
|
nil
|
36
36
|
end
|
37
37
|
|
@@ -70,36 +70,11 @@ module Sidekiq
|
|
70
70
|
ensure
|
71
71
|
self.local_level = old_local_level
|
72
72
|
end
|
73
|
-
|
74
|
-
# Redefined to check severity against #level, and thus the thread-local level, rather than +@level+.
|
75
|
-
# FIXME: Remove when the minimum Ruby version supports overriding Logger#level.
|
76
|
-
def add(severity, message = nil, progname = nil, &block)
|
77
|
-
severity ||= ::Logger::UNKNOWN
|
78
|
-
progname ||= @progname
|
79
|
-
|
80
|
-
return true if @logdev.nil? || severity < level
|
81
|
-
|
82
|
-
if message.nil?
|
83
|
-
if block
|
84
|
-
message = yield
|
85
|
-
else
|
86
|
-
message = progname
|
87
|
-
progname = @progname
|
88
|
-
end
|
89
|
-
end
|
90
|
-
|
91
|
-
@logdev.write format_message(format_severity(severity), Time.now, progname, message)
|
92
|
-
end
|
93
73
|
end
|
94
74
|
|
95
75
|
class Logger < ::Logger
|
96
76
|
include LoggingUtils
|
97
77
|
|
98
|
-
def initialize(*args, **kwargs)
|
99
|
-
super
|
100
|
-
self.formatter = Sidekiq.log_formatter
|
101
|
-
end
|
102
|
-
|
103
78
|
module Formatters
|
104
79
|
class Base < ::Logger::Formatter
|
105
80
|
def tid
|
data/lib/sidekiq/manager.rb
CHANGED
@@ -1,7 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require "sidekiq/processor"
|
4
|
-
require "sidekiq/fetch"
|
5
4
|
require "set"
|
6
5
|
|
7
6
|
module Sidekiq
|
@@ -23,19 +22,19 @@ module Sidekiq
|
|
23
22
|
include Sidekiq::Component
|
24
23
|
|
25
24
|
attr_reader :workers
|
25
|
+
attr_reader :capsule
|
26
26
|
|
27
|
-
def initialize(
|
28
|
-
@config =
|
29
|
-
|
30
|
-
@count = options[:concurrency] || 10
|
27
|
+
def initialize(capsule)
|
28
|
+
@config = @capsule = capsule
|
29
|
+
@count = capsule.concurrency
|
31
30
|
raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
|
32
31
|
|
33
32
|
@done = false
|
34
33
|
@workers = Set.new
|
34
|
+
@plock = Mutex.new
|
35
35
|
@count.times do
|
36
36
|
@workers << Processor.new(@config, &method(:processor_result))
|
37
37
|
end
|
38
|
-
@plock = Mutex.new
|
39
38
|
end
|
40
39
|
|
41
40
|
def start
|
@@ -46,14 +45,12 @@ module Sidekiq
|
|
46
45
|
return if @done
|
47
46
|
@done = true
|
48
47
|
|
49
|
-
logger.info { "Terminating quiet threads" }
|
48
|
+
logger.info { "Terminating quiet threads for #{capsule.name} capsule" }
|
50
49
|
@workers.each(&:terminate)
|
51
|
-
fire_event(:quiet, reverse: true)
|
52
50
|
end
|
53
51
|
|
54
52
|
def stop(deadline)
|
55
53
|
quiet
|
56
|
-
fire_event(:shutdown, reverse: true)
|
57
54
|
|
58
55
|
# some of the shutdown events can be async,
|
59
56
|
# we don't have any way to know when they're done but
|
@@ -66,6 +63,8 @@ module Sidekiq
|
|
66
63
|
return if @workers.empty?
|
67
64
|
|
68
65
|
hard_shutdown
|
66
|
+
ensure
|
67
|
+
capsule.stop
|
69
68
|
end
|
70
69
|
|
71
70
|
def processor_result(processor, reason = nil)
|
@@ -105,8 +104,7 @@ module Sidekiq
|
|
105
104
|
# contract says that jobs are run AT LEAST once. Process termination
|
106
105
|
# is delayed until we're certain the jobs are back in Redis because
|
107
106
|
# it is worse to lose a job than to run it twice.
|
108
|
-
|
109
|
-
strategy.bulk_requeue(jobs, @config)
|
107
|
+
capsule.fetcher.bulk_requeue(jobs)
|
110
108
|
end
|
111
109
|
|
112
110
|
cleanup.each do |processor|
|
@@ -13,9 +13,9 @@ module Sidekiq
|
|
13
13
|
# NB: all metrics and times/dates are UTC only. We specifically do not
|
14
14
|
# support timezones.
|
15
15
|
class Query
|
16
|
-
def initialize(pool:
|
16
|
+
def initialize(pool: nil, now: Time.now)
|
17
17
|
@time = now.utc
|
18
|
-
@pool = pool
|
18
|
+
@pool = pool || Sidekiq.default_configuration.redis_pool
|
19
19
|
@klass = nil
|
20
20
|
end
|
21
21
|
|
@@ -2,7 +2,8 @@ require "concurrent"
|
|
2
2
|
|
3
3
|
module Sidekiq
|
4
4
|
module Metrics
|
5
|
-
#
|
5
|
+
# This is the only dependency on concurrent-ruby in Sidekiq but it's
|
6
|
+
# mandatory for thread-safety until MRI supports atomic operations on values.
|
6
7
|
Counter = ::Concurrent::AtomicFixnum
|
7
8
|
|
8
9
|
# Implements space-efficient but statistically useful histogram storage.
|
@@ -38,7 +39,6 @@ module Sidekiq
|
|
38
39
|
"65s", "100s", "150s", "225s", "335s",
|
39
40
|
"Slow"
|
40
41
|
]
|
41
|
-
|
42
42
|
FETCH = "GET u16 #0 GET u16 #1 GET u16 #2 GET u16 #3 \
|
43
43
|
GET u16 #4 GET u16 #5 GET u16 #6 GET u16 #7 \
|
44
44
|
GET u16 #8 GET u16 #9 GET u16 #10 GET u16 #11 \
|
@@ -46,6 +46,7 @@ module Sidekiq
|
|
46
46
|
GET u16 #16 GET u16 #17 GET u16 #18 GET u16 #19 \
|
47
47
|
GET u16 #20 GET u16 #21 GET u16 #22 GET u16 #23 \
|
48
48
|
GET u16 #24 GET u16 #25".split
|
49
|
+
HISTOGRAM_TTL = 8 * 60 * 60
|
49
50
|
|
50
51
|
def each
|
51
52
|
buckets.each { |counter| yield counter.value }
|
@@ -86,7 +87,7 @@ module Sidekiq
|
|
86
87
|
end
|
87
88
|
|
88
89
|
conn.bitfield(*cmd) if cmd.size > 3
|
89
|
-
conn.expire(key,
|
90
|
+
conn.expire(key, HISTOGRAM_TTL)
|
90
91
|
key
|
91
92
|
end
|
92
93
|
end
|
@@ -1,3 +1,5 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
1
3
|
require "time"
|
2
4
|
require "sidekiq"
|
3
5
|
require "sidekiq/metrics/shared"
|
@@ -48,8 +50,8 @@ module Sidekiq
|
|
48
50
|
end
|
49
51
|
end
|
50
52
|
|
51
|
-
LONG_TERM = 90 * 24 * 60 * 60
|
52
|
-
MID_TERM = 7 * 24 * 60 * 60
|
53
|
+
# LONG_TERM = 90 * 24 * 60 * 60
|
54
|
+
# MID_TERM = 7 * 24 * 60 * 60
|
53
55
|
SHORT_TERM = 8 * 60 * 60
|
54
56
|
|
55
57
|
def flush(time = Time.now)
|
@@ -59,12 +61,13 @@ module Sidekiq
|
|
59
61
|
return if procd == 0 && fails == 0
|
60
62
|
|
61
63
|
now = time.utc
|
62
|
-
nowdate = now.strftime("%Y%m%d")
|
63
|
-
nowhour = now.strftime("%Y%m%d|%-H")
|
64
|
+
# nowdate = now.strftime("%Y%m%d")
|
65
|
+
# nowhour = now.strftime("%Y%m%d|%-H")
|
64
66
|
nowmin = now.strftime("%Y%m%d|%-H:%-M")
|
65
67
|
count = 0
|
66
68
|
|
67
69
|
redis do |conn|
|
70
|
+
# persist fine-grained histogram data
|
68
71
|
if grams.size > 0
|
69
72
|
conn.pipelined do |pipe|
|
70
73
|
grams.each do |_, gram|
|
@@ -73,15 +76,16 @@ module Sidekiq
|
|
73
76
|
end
|
74
77
|
end
|
75
78
|
|
79
|
+
# persist coarse grained execution count + execution millis.
|
80
|
+
# note as of today we don't use or do anything with the
|
81
|
+
# daily or hourly rollups.
|
76
82
|
[
|
77
|
-
["j", jobs, nowdate, LONG_TERM],
|
78
|
-
["j", jobs, nowhour, MID_TERM],
|
83
|
+
# ["j", jobs, nowdate, LONG_TERM],
|
84
|
+
# ["j", jobs, nowhour, MID_TERM],
|
79
85
|
["j", jobs, nowmin, SHORT_TERM]
|
80
86
|
].each do |prefix, data, bucket, ttl|
|
81
|
-
# Quietly seed the new 7.0 stats format so migration is painless.
|
82
87
|
conn.pipelined do |xa|
|
83
88
|
stats = "#{prefix}|#{bucket}"
|
84
|
-
# logger.debug "Flushing metrics #{stats}"
|
85
89
|
data.each_pair do |key, value|
|
86
90
|
xa.hincrby stats, key, value
|
87
91
|
count += 1
|
@@ -89,7 +93,7 @@ module Sidekiq
|
|
89
93
|
xa.expire(stats, ttl)
|
90
94
|
end
|
91
95
|
end
|
92
|
-
logger.
|
96
|
+
logger.debug "Flushed #{count} metrics"
|
93
97
|
count
|
94
98
|
end
|
95
99
|
end
|
@@ -121,14 +125,12 @@ module Sidekiq
|
|
121
125
|
end
|
122
126
|
end
|
123
127
|
|
124
|
-
|
125
|
-
Sidekiq.
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
exec.flush
|
132
|
-
end
|
128
|
+
Sidekiq.configure_server do |config|
|
129
|
+
exec = Sidekiq::Metrics::ExecutionTracker.new(config)
|
130
|
+
config.server_middleware do |chain|
|
131
|
+
chain.add Sidekiq::Metrics::Middleware, exec
|
132
|
+
end
|
133
|
+
config.on(:beat) do
|
134
|
+
exec.flush
|
133
135
|
end
|
134
136
|
end
|
@@ -80,15 +80,6 @@ module Sidekiq
|
|
80
80
|
class Chain
|
81
81
|
include Enumerable
|
82
82
|
|
83
|
-
# A unique instance of the middleware chain is created for
|
84
|
-
# each job executed in order to be thread-safe.
|
85
|
-
# @param copy [Sidekiq::Middleware::Chain] New instance of Chain
|
86
|
-
# @returns nil
|
87
|
-
def initialize_copy(copy)
|
88
|
-
copy.instance_variable_set(:@entries, entries.dup)
|
89
|
-
nil
|
90
|
-
end
|
91
|
-
|
92
83
|
# Iterate through each middleware in the chain
|
93
84
|
def each(&block)
|
94
85
|
entries.each(&block)
|
@@ -105,6 +96,12 @@ module Sidekiq
|
|
105
96
|
@entries ||= []
|
106
97
|
end
|
107
98
|
|
99
|
+
def copy_for(capsule)
|
100
|
+
chain = Sidekiq::Middleware::Chain.new(capsule)
|
101
|
+
chain.instance_variable_set(:@entries, entries.dup)
|
102
|
+
chain
|
103
|
+
end
|
104
|
+
|
108
105
|
# Remove all middleware matching the given Class
|
109
106
|
# @param klass [Class]
|
110
107
|
def remove(klass)
|
@@ -152,6 +149,7 @@ module Sidekiq
|
|
152
149
|
def exists?(klass)
|
153
150
|
any? { |entry| entry.klass == klass }
|
154
151
|
end
|
152
|
+
alias_method :include?, :exists?
|
155
153
|
|
156
154
|
# @return [Boolean] if the chain contains no middleware
|
157
155
|
def empty?
|
@@ -168,23 +166,26 @@ module Sidekiq
|
|
168
166
|
|
169
167
|
# Used by Sidekiq to execute the middleware at runtime
|
170
168
|
# @api private
|
171
|
-
def invoke(*args)
|
169
|
+
def invoke(*args, &block)
|
172
170
|
return yield if empty?
|
173
171
|
|
174
172
|
chain = retrieve
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
173
|
+
traverse(chain, 0, args, &block)
|
174
|
+
end
|
175
|
+
|
176
|
+
private
|
177
|
+
|
178
|
+
def traverse(chain, index, args, &block)
|
179
|
+
if index >= chain.size
|
180
|
+
yield
|
181
|
+
else
|
182
|
+
chain[index].call(*args) do
|
183
|
+
traverse(chain, index + 1, args, &block)
|
180
184
|
end
|
181
185
|
end
|
182
|
-
traverse_chain.call
|
183
186
|
end
|
184
187
|
end
|
185
188
|
|
186
|
-
private
|
187
|
-
|
188
189
|
# Represents each link in the middleware chain
|
189
190
|
# @api private
|
190
191
|
class Entry
|
@@ -22,13 +22,11 @@ module Sidekiq
|
|
22
22
|
end
|
23
23
|
|
24
24
|
def call(_, job, _, _)
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
job["cattr"] = attrs
|
31
|
-
end
|
25
|
+
if !job.has_key?("cattr")
|
26
|
+
attrs = @strklass.constantize.attributes
|
27
|
+
# Retries can push the job N times, we don't
|
28
|
+
# want retries to reset cattr. #5692, #5090
|
29
|
+
job["cattr"] = attrs if attrs.any?
|
32
30
|
end
|
33
31
|
yield
|
34
32
|
end
|
@@ -50,14 +48,9 @@ module Sidekiq
|
|
50
48
|
end
|
51
49
|
end
|
52
50
|
|
53
|
-
def self.persist(klass)
|
54
|
-
|
55
|
-
|
56
|
-
end
|
57
|
-
Sidekiq.configure_server do |config|
|
58
|
-
config.client_middleware.add Save, klass.to_s
|
59
|
-
config.server_middleware.add Load, klass.to_s
|
60
|
-
end
|
51
|
+
def self.persist(klass, config = Sidekiq.default_configuration)
|
52
|
+
config.client_middleware.add Save, klass.to_s
|
53
|
+
config.server_middleware.add Load, klass.to_s
|
61
54
|
end
|
62
55
|
end
|
63
56
|
end
|
data/lib/sidekiq/monitor.rb
CHANGED
@@ -16,8 +16,6 @@ class Sidekiq::Monitor
|
|
16
16
|
return
|
17
17
|
end
|
18
18
|
send(section)
|
19
|
-
rescue => e
|
20
|
-
abort "Couldn't get status: #{e}"
|
21
19
|
end
|
22
20
|
|
23
21
|
def all
|
@@ -49,10 +47,25 @@ class Sidekiq::Monitor
|
|
49
47
|
def processes
|
50
48
|
puts "---- Processes (#{process_set.size}) ----"
|
51
49
|
process_set.each_with_index do |process, index|
|
50
|
+
# Keep compatibility with legacy versions since we don't want to break sidekiqmon during rolling upgrades or downgrades.
|
51
|
+
#
|
52
|
+
# Before:
|
53
|
+
# ["default", "critical"]
|
54
|
+
#
|
55
|
+
# After:
|
56
|
+
# {"default" => 1, "critical" => 10}
|
57
|
+
queues =
|
58
|
+
if process["weights"]
|
59
|
+
process["weights"].sort_by { |queue| queue[0] }.map { |capsule| capsule.map { |name, weight| (weight > 0) ? "#{name}: #{weight}" : name }.join(", ") }
|
60
|
+
else
|
61
|
+
process["queues"].sort
|
62
|
+
end
|
63
|
+
|
52
64
|
puts "#{process["identity"]} #{tags_for(process)}"
|
53
65
|
puts " Started: #{Time.at(process["started_at"])} (#{time_ago(process["started_at"])})"
|
54
66
|
puts " Threads: #{process["concurrency"]} (#{process["busy"]} busy)"
|
55
|
-
puts " Queues: #{split_multiline(
|
67
|
+
puts " Queues: #{split_multiline(queues, pad: 11)}"
|
68
|
+
puts " Version: #{process["version"] || "Unknown"}" if process["version"] != Sidekiq::VERSION
|
56
69
|
puts "" unless (index + 1) == process_set.size
|
57
70
|
end
|
58
71
|
end
|