sidekiq 6.5.1 → 6.5.4
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +22 -0
- data/lib/sidekiq/api.rb +126 -22
- data/lib/sidekiq/cli.rb +1 -0
- data/lib/sidekiq/component.rb +2 -1
- data/lib/sidekiq/job_retry.rb +54 -35
- data/lib/sidekiq/launcher.rb +4 -2
- data/lib/sidekiq/metrics/deploy.rb +47 -0
- data/lib/sidekiq/metrics/query.rb +124 -0
- data/lib/sidekiq/metrics/shared.rb +94 -0
- data/lib/sidekiq/metrics/tracking.rb +134 -0
- data/lib/sidekiq/middleware/chain.rb +70 -35
- data/lib/sidekiq/middleware/current_attributes.rb +6 -4
- data/lib/sidekiq/processor.rb +9 -3
- data/lib/sidekiq/scheduled.rb +1 -7
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web/application.rb +13 -0
- data/lib/sidekiq/web/helpers.rb +24 -1
- data/lib/sidekiq/web.rb +4 -0
- data/lib/sidekiq.rb +9 -1
- data/sidekiq.gemspec +1 -1
- data/web/assets/javascripts/application.js +1 -1
- data/web/assets/javascripts/dashboard.js +0 -17
- data/web/assets/javascripts/graph.js +16 -0
- data/web/locales/en.yml +4 -0
- data/web/views/_nav.erb +1 -1
- data/web/views/busy.erb +1 -1
- data/web/views/dashboard.erb +1 -0
- data/web/views/metrics.erb +59 -0
- data/web/views/metrics_for_job.erb +92 -0
- data/web/views/queue.erb +5 -1
- metadata +11 -5
- data/lib/sidekiq/.DS_Store +0 -0
@@ -0,0 +1,47 @@
|
|
1
|
+
require "sidekiq"
|
2
|
+
require "date"
|
3
|
+
|
4
|
+
# This file is designed to be required within the user's
|
5
|
+
# deployment script; it should need a bare minimum of dependencies.
|
6
|
+
#
|
7
|
+
# require "sidekiq/metrics/deploy"
|
8
|
+
# gitdesc = `git log -1 --format="%h %s"`.strip
|
9
|
+
# d = Sidekiq::Metrics::Deploy.new
|
10
|
+
# d.mark(label: gitdesc)
|
11
|
+
#
|
12
|
+
# Note that you cannot mark more than once per minute. This is a feature, not a bug.
|
13
|
+
module Sidekiq
|
14
|
+
module Metrics
|
15
|
+
class Deploy
|
16
|
+
MARK_TTL = 90 * 24 * 60 * 60 # 90 days
|
17
|
+
|
18
|
+
def initialize(pool = Sidekiq.redis_pool)
|
19
|
+
@pool = pool
|
20
|
+
end
|
21
|
+
|
22
|
+
def mark(at: Time.now, label: "")
|
23
|
+
# we need to round the timestamp so that we gracefully
|
24
|
+
# handle an excepted common error in marking deploys:
|
25
|
+
# having every process mark its deploy, leading
|
26
|
+
# to N marks for each deploy. Instead we round the time
|
27
|
+
# to the minute so that multple marks within that minute
|
28
|
+
# will all naturally rollup into one mark per minute.
|
29
|
+
whence = at.utc
|
30
|
+
floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
|
31
|
+
datecode = floor.strftime("%Y%m%d")
|
32
|
+
key = "#{datecode}-marks"
|
33
|
+
@pool.with do |c|
|
34
|
+
c.pipelined do |pipe|
|
35
|
+
pipe.hsetnx(key, floor.rfc3339, label)
|
36
|
+
pipe.expire(key, MARK_TTL)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
def fetch(date = Time.now.utc.to_date)
|
42
|
+
datecode = date.strftime("%Y%m%d")
|
43
|
+
@pool.with { |c| c.hgetall("#{datecode}-marks") }
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,124 @@
|
|
1
|
+
require "sidekiq"
|
2
|
+
require "date"
|
3
|
+
require "set"
|
4
|
+
|
5
|
+
require "sidekiq/metrics/shared"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
module Metrics
|
9
|
+
# Allows caller to query for Sidekiq execution metrics within Redis.
|
10
|
+
# Caller sets a set of attributes to act as filters. {#fetch} will call
|
11
|
+
# Redis and return a Hash of results.
|
12
|
+
#
|
13
|
+
# NB: all metrics and times/dates are UTC only. We specifically do not
|
14
|
+
# support timezones.
|
15
|
+
class Query
|
16
|
+
# :hour, :day, :month
|
17
|
+
attr_accessor :period
|
18
|
+
|
19
|
+
# a specific job class, e.g. "App::OrderJob"
|
20
|
+
attr_accessor :klass
|
21
|
+
|
22
|
+
# the date specific to the period
|
23
|
+
# for :day or :hour, something like Date.today or Date.new(2022, 7, 13)
|
24
|
+
# for :month, Date.new(2022, 7, 1)
|
25
|
+
attr_accessor :date
|
26
|
+
|
27
|
+
# for period = :hour, the specific hour, integer e.g. 1 or 18
|
28
|
+
# note that hours and minutes do not have a leading zero so minute-specific
|
29
|
+
# keys will look like "j|20220718|7:3" for data at 07:03.
|
30
|
+
attr_accessor :hour
|
31
|
+
|
32
|
+
def initialize(pool: Sidekiq.redis_pool, now: Time.now)
|
33
|
+
@time = now.utc
|
34
|
+
@pool = pool
|
35
|
+
@klass = nil
|
36
|
+
end
|
37
|
+
|
38
|
+
# Get metric data from the last hour and roll it up
|
39
|
+
# into top processed count and execution time based on class.
|
40
|
+
def top_jobs
|
41
|
+
resultset = {}
|
42
|
+
resultset[:date] = @time.to_date
|
43
|
+
resultset[:period] = :hour
|
44
|
+
resultset[:ends_at] = @time
|
45
|
+
time = @time
|
46
|
+
|
47
|
+
results = @pool.with do |conn|
|
48
|
+
conn.pipelined do |pipe|
|
49
|
+
resultset[:size] = 60
|
50
|
+
60.times do |idx|
|
51
|
+
key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
|
52
|
+
pipe.hgetall key
|
53
|
+
time -= 60
|
54
|
+
end
|
55
|
+
resultset[:starts_at] = time
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
t = Hash.new(0)
|
60
|
+
klsset = Set.new
|
61
|
+
# merge the per-minute data into a totals hash for the hour
|
62
|
+
results.each do |hash|
|
63
|
+
hash.each { |k, v| t[k] = t[k] + v.to_i }
|
64
|
+
klsset.merge(hash.keys.map { |k| k.split("|")[0] })
|
65
|
+
end
|
66
|
+
resultset[:job_classes] = klsset.delete_if { |item| item.size < 3 }
|
67
|
+
resultset[:totals] = t
|
68
|
+
top = t.each_with_object({}) do |(k, v), memo|
|
69
|
+
(kls, metric) = k.split("|")
|
70
|
+
memo[metric] ||= Hash.new(0)
|
71
|
+
memo[metric][kls] = v
|
72
|
+
end
|
73
|
+
|
74
|
+
sorted = {}
|
75
|
+
top.each_pair do |metric, hash|
|
76
|
+
sorted[metric] = hash.sort_by { |k, v| v }.reverse.to_h
|
77
|
+
end
|
78
|
+
resultset[:top_classes] = sorted
|
79
|
+
resultset
|
80
|
+
end
|
81
|
+
|
82
|
+
def for_job(klass)
|
83
|
+
resultset = {}
|
84
|
+
resultset[:date] = @time.to_date
|
85
|
+
resultset[:period] = :hour
|
86
|
+
resultset[:ends_at] = @time
|
87
|
+
marks = @pool.with { |c| c.hgetall("#{@time.strftime("%Y%m%d")}-marks") }
|
88
|
+
|
89
|
+
time = @time
|
90
|
+
initial = @pool.with do |conn|
|
91
|
+
conn.pipelined do |pipe|
|
92
|
+
resultset[:size] = 60
|
93
|
+
60.times do |idx|
|
94
|
+
key = "j|#{time.strftime("%Y%m%d|%-H:%-M")}"
|
95
|
+
pipe.hmget key, "#{klass}|ms", "#{klass}|p", "#{klass}|f"
|
96
|
+
time -= 60
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
time = @time
|
102
|
+
hist = Histogram.new(klass)
|
103
|
+
results = @pool.with do |conn|
|
104
|
+
initial.map do |(ms, p, f)|
|
105
|
+
tm = Time.utc(time.year, time.month, time.mday, time.hour, time.min, 0)
|
106
|
+
{
|
107
|
+
time: tm.iso8601,
|
108
|
+
epoch: tm.to_i,
|
109
|
+
ms: ms.to_i, p: p.to_i, f: f.to_i, hist: hist.fetch(conn, time)
|
110
|
+
}.tap { |x|
|
111
|
+
x[:mark] = marks[x[:time]] if marks[x[:time]]
|
112
|
+
time -= 60
|
113
|
+
}
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
resultset[:marks] = marks
|
118
|
+
resultset[:starts_at] = time
|
119
|
+
resultset[:data] = results
|
120
|
+
resultset
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
@@ -0,0 +1,94 @@
|
|
1
|
+
require "concurrent"
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
module Metrics
|
5
|
+
# TODO Support apps without concurrent-ruby
|
6
|
+
Counter = ::Concurrent::AtomicFixnum
|
7
|
+
|
8
|
+
# Implements space-efficient but statistically useful histogram storage.
|
9
|
+
# A precise time histogram stores every time. Instead we break times into a set of
|
10
|
+
# known buckets and increment counts of the associated time bucket. Even if we call
|
11
|
+
# the histogram a million times, we'll still only store 26 buckets.
|
12
|
+
# NB: needs to be thread-safe or resiliant to races.
|
13
|
+
#
|
14
|
+
# To store this data, we use Redis' BITFIELD command to store unsigned 16-bit counters
|
15
|
+
# per bucket per klass per minute. It's unlikely that most people will be executing more
|
16
|
+
# than 1000 job/sec for a full minute of a specific type.
|
17
|
+
class Histogram
|
18
|
+
include Enumerable
|
19
|
+
|
20
|
+
# This number represents the maximum milliseconds for this bucket.
|
21
|
+
# 20 means all job executions up to 20ms, e.g. if a job takes
|
22
|
+
# 280ms, it'll increment bucket[7]. Note we can track job executions
|
23
|
+
# up to about 5.5 minutes. After that, it's assumed you're probably
|
24
|
+
# not too concerned with its performance.
|
25
|
+
BUCKET_INTERVALS = [
|
26
|
+
20, 30, 45, 65, 100,
|
27
|
+
150, 225, 335, 500, 750,
|
28
|
+
1100, 1700, 2500, 3800, 5750,
|
29
|
+
8500, 13000, 20000, 30000, 45000,
|
30
|
+
65000, 100000, 150000, 225000, 335000,
|
31
|
+
Float::INFINITY # the "maybe your job is too long" bucket
|
32
|
+
]
|
33
|
+
LABELS = [
|
34
|
+
"20ms", "30ms", "45ms", "65ms", "100ms",
|
35
|
+
"150ms", "225ms", "335ms", "500ms", "750ms",
|
36
|
+
"1.1s", "1.7s", "2.5s", "3.8s", "5.75s",
|
37
|
+
"8.5s", "13s", "20s", "30s", "45s",
|
38
|
+
"65s", "100s", "150s", "225s", "335s",
|
39
|
+
"Slow"
|
40
|
+
]
|
41
|
+
|
42
|
+
FETCH = "GET u16 #0 GET u16 #1 GET u16 #2 GET u16 #3 \
|
43
|
+
GET u16 #4 GET u16 #5 GET u16 #6 GET u16 #7 \
|
44
|
+
GET u16 #8 GET u16 #9 GET u16 #10 GET u16 #11 \
|
45
|
+
GET u16 #12 GET u16 #13 GET u16 #14 GET u16 #15 \
|
46
|
+
GET u16 #16 GET u16 #17 GET u16 #18 GET u16 #19 \
|
47
|
+
GET u16 #20 GET u16 #21 GET u16 #22 GET u16 #23 \
|
48
|
+
GET u16 #24 GET u16 #25".split
|
49
|
+
|
50
|
+
def each
|
51
|
+
buckets.each { |counter| yield counter.value }
|
52
|
+
end
|
53
|
+
|
54
|
+
def label(idx)
|
55
|
+
LABELS[idx]
|
56
|
+
end
|
57
|
+
|
58
|
+
attr_reader :buckets
|
59
|
+
def initialize(klass)
|
60
|
+
@klass = klass
|
61
|
+
@buckets = Array.new(BUCKET_INTERVALS.size) { Counter.new }
|
62
|
+
end
|
63
|
+
|
64
|
+
def record_time(ms)
|
65
|
+
index_to_use = BUCKET_INTERVALS.each_index do |idx|
|
66
|
+
break idx if ms < BUCKET_INTERVALS[idx]
|
67
|
+
end
|
68
|
+
|
69
|
+
@buckets[index_to_use].increment
|
70
|
+
end
|
71
|
+
|
72
|
+
def fetch(conn, now = Time.now)
|
73
|
+
window = now.utc.strftime("%d-%H:%-M")
|
74
|
+
key = "#{@klass}-#{window}"
|
75
|
+
conn.bitfield(key, *FETCH)
|
76
|
+
end
|
77
|
+
|
78
|
+
def persist(conn, now = Time.now)
|
79
|
+
buckets, @buckets = @buckets, []
|
80
|
+
window = now.utc.strftime("%d-%H:%-M")
|
81
|
+
key = "#{@klass}-#{window}"
|
82
|
+
cmd = [key, "OVERFLOW", "SAT"]
|
83
|
+
buckets.each_with_index do |counter, idx|
|
84
|
+
val = counter.value
|
85
|
+
cmd << "INCRBY" << "u16" << "##{idx}" << val.to_s if val > 0
|
86
|
+
end
|
87
|
+
|
88
|
+
conn.bitfield(*cmd) if cmd.size > 3
|
89
|
+
conn.expire(key, 86400)
|
90
|
+
key
|
91
|
+
end
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
@@ -0,0 +1,134 @@
|
|
1
|
+
require "time"
|
2
|
+
require "sidekiq"
|
3
|
+
require "sidekiq/metrics/shared"
|
4
|
+
|
5
|
+
# This file contains the components which track execution metrics within Sidekiq.
|
6
|
+
module Sidekiq
|
7
|
+
module Metrics
|
8
|
+
class ExecutionTracker
|
9
|
+
include Sidekiq::Component
|
10
|
+
|
11
|
+
def initialize(config)
|
12
|
+
@config = config
|
13
|
+
@jobs = Hash.new(0)
|
14
|
+
@totals = Hash.new(0)
|
15
|
+
@grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
|
16
|
+
@lock = Mutex.new
|
17
|
+
end
|
18
|
+
|
19
|
+
def track(queue, klass)
|
20
|
+
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
|
21
|
+
time_ms = 0
|
22
|
+
begin
|
23
|
+
begin
|
24
|
+
yield
|
25
|
+
ensure
|
26
|
+
finish = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
|
27
|
+
time_ms = finish - start
|
28
|
+
end
|
29
|
+
# We don't track time for failed jobs as they can have very unpredictable
|
30
|
+
# execution times. more important to know average time for successful jobs so we
|
31
|
+
# can better recognize when a perf regression is introduced.
|
32
|
+
@lock.synchronize {
|
33
|
+
@grams[klass].record_time(time_ms)
|
34
|
+
@jobs["#{klass}|ms"] += time_ms
|
35
|
+
@totals["ms"] += time_ms
|
36
|
+
}
|
37
|
+
rescue Exception
|
38
|
+
@lock.synchronize {
|
39
|
+
@jobs["#{klass}|f"] += 1
|
40
|
+
@totals["f"] += 1
|
41
|
+
}
|
42
|
+
raise
|
43
|
+
ensure
|
44
|
+
@lock.synchronize {
|
45
|
+
@jobs["#{klass}|p"] += 1
|
46
|
+
@totals["p"] += 1
|
47
|
+
}
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
LONG_TERM = 90 * 24 * 60 * 60
|
52
|
+
MID_TERM = 7 * 24 * 60 * 60
|
53
|
+
SHORT_TERM = 8 * 60 * 60
|
54
|
+
|
55
|
+
def flush(time = Time.now)
|
56
|
+
totals, jobs, grams = reset
|
57
|
+
procd = totals["p"]
|
58
|
+
fails = totals["f"]
|
59
|
+
return if procd == 0 && fails == 0
|
60
|
+
|
61
|
+
now = time.utc
|
62
|
+
nowdate = now.strftime("%Y%m%d")
|
63
|
+
nowhour = now.strftime("%Y%m%d|%-H")
|
64
|
+
nowmin = now.strftime("%Y%m%d|%-H:%-M")
|
65
|
+
count = 0
|
66
|
+
|
67
|
+
redis do |conn|
|
68
|
+
if grams.size > 0
|
69
|
+
conn.pipelined do |pipe|
|
70
|
+
grams.each do |_, gram|
|
71
|
+
gram.persist(pipe, now)
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
[
|
77
|
+
["j", jobs, nowdate, LONG_TERM],
|
78
|
+
["j", jobs, nowhour, MID_TERM],
|
79
|
+
["j", jobs, nowmin, SHORT_TERM]
|
80
|
+
].each do |prefix, data, bucket, ttl|
|
81
|
+
# Quietly seed the new 7.0 stats format so migration is painless.
|
82
|
+
conn.pipelined do |xa|
|
83
|
+
stats = "#{prefix}|#{bucket}"
|
84
|
+
# logger.debug "Flushing metrics #{stats}"
|
85
|
+
data.each_pair do |key, value|
|
86
|
+
xa.hincrby stats, key, value
|
87
|
+
count += 1
|
88
|
+
end
|
89
|
+
xa.expire(stats, ttl)
|
90
|
+
end
|
91
|
+
end
|
92
|
+
logger.info "Flushed #{count} metrics"
|
93
|
+
count
|
94
|
+
end
|
95
|
+
end
|
96
|
+
|
97
|
+
private
|
98
|
+
|
99
|
+
def reset
|
100
|
+
@lock.synchronize {
|
101
|
+
array = [@totals, @jobs, @grams]
|
102
|
+
@totals = Hash.new(0)
|
103
|
+
@jobs = Hash.new(0)
|
104
|
+
@grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
|
105
|
+
array
|
106
|
+
}
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
class Middleware
|
111
|
+
include Sidekiq::ServerMiddleware
|
112
|
+
|
113
|
+
def initialize(options)
|
114
|
+
@exec = options
|
115
|
+
end
|
116
|
+
|
117
|
+
def call(_instance, hash, queue, &block)
|
118
|
+
@exec.track(queue, hash["wrapped"] || hash["class"], &block)
|
119
|
+
end
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
if ENV["SIDEKIQ_METRICS_BETA"] == "1"
|
125
|
+
Sidekiq.configure_server do |config|
|
126
|
+
exec = Sidekiq::Metrics::ExecutionTracker.new(config)
|
127
|
+
config.server_middleware do |chain|
|
128
|
+
chain.add Sidekiq::Metrics::Middleware, exec
|
129
|
+
end
|
130
|
+
config.on(:beat) do
|
131
|
+
exec.flush
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
@@ -4,84 +4,98 @@ require "sidekiq/middleware/modules"
|
|
4
4
|
|
5
5
|
module Sidekiq
|
6
6
|
# Middleware is code configured to run before/after
|
7
|
-
# a
|
7
|
+
# a job is processed. It is patterned after Rack
|
8
8
|
# middleware. Middleware exists for the client side
|
9
9
|
# (pushing jobs onto the queue) as well as the server
|
10
10
|
# side (when jobs are actually processed).
|
11
11
|
#
|
12
|
+
# Callers will register middleware Classes and Sidekiq will
|
13
|
+
# create new instances of the middleware for every job. This
|
14
|
+
# is important so that instance state is not shared accidentally
|
15
|
+
# between job executions.
|
16
|
+
#
|
12
17
|
# To add middleware for the client:
|
13
18
|
#
|
14
|
-
#
|
15
|
-
#
|
16
|
-
#
|
19
|
+
# Sidekiq.configure_client do |config|
|
20
|
+
# config.client_middleware do |chain|
|
21
|
+
# chain.add MyClientHook
|
22
|
+
# end
|
17
23
|
# end
|
18
|
-
# end
|
19
24
|
#
|
20
25
|
# To modify middleware for the server, just call
|
21
26
|
# with another block:
|
22
27
|
#
|
23
|
-
#
|
24
|
-
#
|
25
|
-
#
|
26
|
-
#
|
28
|
+
# Sidekiq.configure_server do |config|
|
29
|
+
# config.server_middleware do |chain|
|
30
|
+
# chain.add MyServerHook
|
31
|
+
# chain.remove ActiveRecord
|
32
|
+
# end
|
27
33
|
# end
|
28
|
-
# end
|
29
34
|
#
|
30
35
|
# To insert immediately preceding another entry:
|
31
36
|
#
|
32
|
-
#
|
33
|
-
#
|
34
|
-
#
|
37
|
+
# Sidekiq.configure_client do |config|
|
38
|
+
# config.client_middleware do |chain|
|
39
|
+
# chain.insert_before ActiveRecord, MyClientHook
|
40
|
+
# end
|
35
41
|
# end
|
36
|
-
# end
|
37
42
|
#
|
38
43
|
# To insert immediately after another entry:
|
39
44
|
#
|
40
|
-
#
|
41
|
-
#
|
42
|
-
#
|
45
|
+
# Sidekiq.configure_client do |config|
|
46
|
+
# config.client_middleware do |chain|
|
47
|
+
# chain.insert_after ActiveRecord, MyClientHook
|
48
|
+
# end
|
43
49
|
# end
|
44
|
-
# end
|
45
50
|
#
|
46
51
|
# This is an example of a minimal server middleware:
|
47
52
|
#
|
48
|
-
#
|
49
|
-
#
|
50
|
-
#
|
51
|
-
#
|
52
|
-
#
|
53
|
-
#
|
54
|
-
#
|
53
|
+
# class MyServerHook
|
54
|
+
# include Sidekiq::ServerMiddleware
|
55
|
+
#
|
56
|
+
# def call(job_instance, msg, queue)
|
57
|
+
# logger.info "Before job"
|
58
|
+
# redis {|conn| conn.get("foo") } # do something in Redis
|
59
|
+
# yield
|
60
|
+
# logger.info "After job"
|
61
|
+
# end
|
55
62
|
# end
|
56
|
-
# end
|
57
63
|
#
|
58
64
|
# This is an example of a minimal client middleware, note
|
59
65
|
# the method must return the result or the job will not push
|
60
66
|
# to Redis:
|
61
67
|
#
|
62
|
-
#
|
63
|
-
#
|
64
|
-
#
|
65
|
-
#
|
66
|
-
#
|
67
|
-
#
|
68
|
-
#
|
68
|
+
# class MyClientHook
|
69
|
+
# include Sidekiq::ClientMiddleware
|
70
|
+
#
|
71
|
+
# def call(job_class, msg, queue, redis_pool)
|
72
|
+
# logger.info "Before push"
|
73
|
+
# result = yield
|
74
|
+
# logger.info "After push"
|
75
|
+
# result
|
76
|
+
# end
|
69
77
|
# end
|
70
|
-
# end
|
71
78
|
#
|
72
79
|
module Middleware
|
73
80
|
class Chain
|
74
81
|
include Enumerable
|
75
82
|
|
83
|
+
# A unique instance of the middleware chain is created for
|
84
|
+
# each job executed in order to be thread-safe.
|
85
|
+
# @param copy [Sidekiq::Middleware::Chain] New instance of Chain
|
86
|
+
# @returns nil
|
76
87
|
def initialize_copy(copy)
|
77
88
|
copy.instance_variable_set(:@entries, entries.dup)
|
89
|
+
nil
|
78
90
|
end
|
79
91
|
|
92
|
+
# Iterate through each middleware in the chain
|
80
93
|
def each(&block)
|
81
94
|
entries.each(&block)
|
82
95
|
end
|
83
96
|
|
84
|
-
|
97
|
+
# @api private
|
98
|
+
def initialize(config = nil) # :nodoc:
|
85
99
|
@config = config
|
86
100
|
@entries = nil
|
87
101
|
yield self if block_given?
|
@@ -91,20 +105,33 @@ module Sidekiq
|
|
91
105
|
@entries ||= []
|
92
106
|
end
|
93
107
|
|
108
|
+
# Remove all middleware matching the given Class
|
109
|
+
# @param klass [Class]
|
94
110
|
def remove(klass)
|
95
111
|
entries.delete_if { |entry| entry.klass == klass }
|
96
112
|
end
|
97
113
|
|
114
|
+
# Add the given middleware to the end of the chain.
|
115
|
+
# Sidekiq will call `klass.new(*args)` to create a clean
|
116
|
+
# copy of your middleware for every job executed.
|
117
|
+
#
|
118
|
+
# chain.add(Statsd::Metrics, { collector: "localhost:8125" })
|
119
|
+
#
|
120
|
+
# @param klass [Class] Your middleware class
|
121
|
+
# @param *args [Array<Object>] Set of arguments to pass to every instance of your middleware
|
98
122
|
def add(klass, *args)
|
99
123
|
remove(klass)
|
100
124
|
entries << Entry.new(@config, klass, *args)
|
101
125
|
end
|
102
126
|
|
127
|
+
# Identical to {#add} except the middleware is added to the front of the chain.
|
103
128
|
def prepend(klass, *args)
|
104
129
|
remove(klass)
|
105
130
|
entries.insert(0, Entry.new(@config, klass, *args))
|
106
131
|
end
|
107
132
|
|
133
|
+
# Inserts +newklass+ before +oldklass+ in the chain.
|
134
|
+
# Useful if one middleware must run before another middleware.
|
108
135
|
def insert_before(oldklass, newklass, *args)
|
109
136
|
i = entries.index { |entry| entry.klass == newklass }
|
110
137
|
new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
|
@@ -112,6 +139,8 @@ module Sidekiq
|
|
112
139
|
entries.insert(i, new_entry)
|
113
140
|
end
|
114
141
|
|
142
|
+
# Inserts +newklass+ after +oldklass+ in the chain.
|
143
|
+
# Useful if one middleware must run after another middleware.
|
115
144
|
def insert_after(oldklass, newklass, *args)
|
116
145
|
i = entries.index { |entry| entry.klass == newklass }
|
117
146
|
new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
|
@@ -119,10 +148,12 @@ module Sidekiq
|
|
119
148
|
entries.insert(i + 1, new_entry)
|
120
149
|
end
|
121
150
|
|
151
|
+
# @return [Boolean] if the given class is already in the chain
|
122
152
|
def exists?(klass)
|
123
153
|
any? { |entry| entry.klass == klass }
|
124
154
|
end
|
125
155
|
|
156
|
+
# @return [Boolean] if the chain contains no middleware
|
126
157
|
def empty?
|
127
158
|
@entries.nil? || @entries.empty?
|
128
159
|
end
|
@@ -135,6 +166,8 @@ module Sidekiq
|
|
135
166
|
entries.clear
|
136
167
|
end
|
137
168
|
|
169
|
+
# Used by Sidekiq to execute the middleware at runtime
|
170
|
+
# @api private
|
138
171
|
def invoke(*args)
|
139
172
|
return yield if empty?
|
140
173
|
|
@@ -152,6 +185,8 @@ module Sidekiq
|
|
152
185
|
|
153
186
|
private
|
154
187
|
|
188
|
+
# Represents each link in the middleware chain
|
189
|
+
# @api private
|
155
190
|
class Entry
|
156
191
|
attr_reader :klass
|
157
192
|
|
@@ -23,10 +23,12 @@ module Sidekiq
|
|
23
23
|
|
24
24
|
def call(_, job, _, _)
|
25
25
|
attrs = @klass.attributes
|
26
|
-
if
|
27
|
-
job
|
28
|
-
|
29
|
-
|
26
|
+
if attrs.any?
|
27
|
+
if job.has_key?("cattr")
|
28
|
+
job["cattr"].merge!(attrs)
|
29
|
+
else
|
30
|
+
job["cattr"] = attrs
|
31
|
+
end
|
30
32
|
end
|
31
33
|
yield
|
32
34
|
end
|
data/lib/sidekiq/processor.rb
CHANGED
@@ -152,8 +152,14 @@ module Sidekiq
|
|
152
152
|
job_hash = Sidekiq.load_json(jobstr)
|
153
153
|
rescue => ex
|
154
154
|
handle_exception(ex, {context: "Invalid JSON for job", jobstr: jobstr})
|
155
|
-
|
156
|
-
|
155
|
+
now = Time.now.to_f
|
156
|
+
config.redis do |conn|
|
157
|
+
conn.multi do |xa|
|
158
|
+
xa.zadd("dead", now.to_s, jobstr)
|
159
|
+
xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds])
|
160
|
+
xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs])
|
161
|
+
end
|
162
|
+
end
|
157
163
|
return uow.acknowledge
|
158
164
|
end
|
159
165
|
|
@@ -174,7 +180,7 @@ module Sidekiq
|
|
174
180
|
# signals that we created a retry successfully. We can acknowlege the job.
|
175
181
|
ack = true
|
176
182
|
e = h.cause || h
|
177
|
-
handle_exception(e, {context: "Job raised exception", job: job_hash
|
183
|
+
handle_exception(e, {context: "Job raised exception", job: job_hash})
|
178
184
|
raise e
|
179
185
|
rescue Exception => ex
|
180
186
|
# Unexpected error! This is very bad and indicates an exception that got past
|
data/lib/sidekiq/scheduled.rb
CHANGED
@@ -1,7 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require "sidekiq"
|
4
|
-
require "sidekiq/api"
|
5
4
|
require "sidekiq/component"
|
6
5
|
|
7
6
|
module Sidekiq
|
@@ -183,13 +182,8 @@ module Sidekiq
|
|
183
182
|
end
|
184
183
|
|
185
184
|
def process_count
|
186
|
-
|
187
|
-
# expensive at scale. Cut it down by 90% with this counter.
|
188
|
-
# NB: This method is only called by the scheduler thread so we
|
189
|
-
# don't need to worry about the thread safety of +=.
|
190
|
-
pcount = Sidekiq::ProcessSet.new(@count_calls % 10 == 0).size
|
185
|
+
pcount = Sidekiq.redis { |conn| conn.scard("processes") }
|
191
186
|
pcount = 1 if pcount == 0
|
192
|
-
@count_calls += 1
|
193
187
|
pcount
|
194
188
|
end
|
195
189
|
|
data/lib/sidekiq/version.rb
CHANGED