sidekiq 5.2.4 → 7.2.4
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Changes.md +672 -8
- data/LICENSE.txt +9 -0
- data/README.md +48 -51
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiq +22 -3
- data/bin/sidekiqload +213 -115
- data/bin/sidekiqmon +11 -0
- data/lib/generators/sidekiq/job_generator.rb +57 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +623 -352
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +214 -229
- data/lib/sidekiq/client.rb +127 -102
- data/lib/sidekiq/component.rb +68 -0
- data/lib/sidekiq/config.rb +287 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +49 -42
- data/lib/sidekiq/job.rb +374 -0
- data/lib/sidekiq/job_logger.rb +33 -7
- data/lib/sidekiq/job_retry.rb +157 -108
- data/lib/sidekiq/job_util.rb +107 -0
- data/lib/sidekiq/launcher.rb +206 -106
- data/lib/sidekiq/logger.rb +131 -0
- data/lib/sidekiq/manager.rb +43 -46
- data/lib/sidekiq/metrics/query.rb +156 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +140 -0
- data/lib/sidekiq/middleware/chain.rb +113 -56
- data/lib/sidekiq/middleware/current_attributes.rb +95 -0
- data/lib/sidekiq/middleware/i18n.rb +7 -7
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +146 -0
- data/lib/sidekiq/paginator.rb +28 -16
- data/lib/sidekiq/processor.rb +126 -117
- data/lib/sidekiq/rails.rb +52 -38
- data/lib/sidekiq/redis_client_adapter.rb +111 -0
- data/lib/sidekiq/redis_connection.rb +41 -112
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +112 -50
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing/inline.rb +6 -5
- data/lib/sidekiq/testing.rb +91 -90
- data/lib/sidekiq/transaction_aware_client.rb +51 -0
- data/lib/sidekiq/version.rb +3 -1
- data/lib/sidekiq/web/action.rb +20 -11
- data/lib/sidekiq/web/application.rb +202 -80
- data/lib/sidekiq/web/csrf_protection.rb +183 -0
- data/lib/sidekiq/web/helpers.rb +165 -114
- data/lib/sidekiq/web/router.rb +23 -19
- data/lib/sidekiq/web.rb +68 -107
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +92 -182
- data/sidekiq.gemspec +25 -16
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/javascripts/application.js +152 -61
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +182 -0
- data/web/assets/javascripts/dashboard.js +35 -293
- data/web/assets/javascripts/metrics.js +298 -0
- data/web/assets/stylesheets/application-dark.css +147 -0
- data/web/assets/stylesheets/application-rtl.css +10 -93
- data/web/assets/stylesheets/application.css +124 -522
- data/web/assets/stylesheets/bootstrap.css +1 -1
- data/web/locales/ar.yml +71 -65
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -53
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +86 -66
- data/web/locales/es.yml +70 -54
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +83 -62
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +75 -64
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +83 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +68 -63
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +18 -3
- data/web/views/_job_info.erb +21 -4
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +1 -1
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +79 -29
- data/web/views/dashboard.erb +48 -18
- data/web/views/dead.erb +3 -3
- data/web/views/filtering.erb +7 -0
- data/web/views/layout.erb +3 -1
- data/web/views/metrics.erb +91 -0
- data/web/views/metrics_for_job.erb +59 -0
- data/web/views/morgue.erb +14 -15
- data/web/views/queue.erb +33 -24
- data/web/views/queues.erb +19 -5
- data/web/views/retries.erb +16 -17
- data/web/views/retry.erb +3 -3
- data/web/views/scheduled.erb +17 -15
- metadata +71 -72
- data/.github/contributing.md +0 -32
- data/.github/issue_template.md +0 -11
- data/.gitignore +0 -15
- data/.travis.yml +0 -17
- data/3.0-Upgrade.md +0 -70
- data/4.0-Upgrade.md +0 -53
- data/5.0-Upgrade.md +0 -56
- data/Appraisals +0 -9
- data/COMM-LICENSE +0 -95
- data/Ent-Changes.md +0 -225
- data/Gemfile +0 -29
- data/LICENSE +0 -9
- data/Pro-2.0-Upgrade.md +0 -138
- data/Pro-3.0-Upgrade.md +0 -44
- data/Pro-4.0-Upgrade.md +0 -35
- data/Pro-Changes.md +0 -752
- data/Rakefile +0 -9
- data/bin/sidekiqctl +0 -237
- data/code_of_conduct.md +0 -50
- data/gemfiles/rails_4.gemfile +0 -31
- data/gemfiles/rails_5.gemfile +0 -31
- data/lib/generators/sidekiq/worker_generator.rb +0 -49
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/delay.rb +0 -42
- data/lib/sidekiq/exception_handler.rb +0 -29
- data/lib/sidekiq/extensions/action_mailer.rb +0 -57
- data/lib/sidekiq/extensions/active_record.rb +0 -40
- data/lib/sidekiq/extensions/class_methods.rb +0 -40
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
- data/lib/sidekiq/util.rb +0 -66
- data/lib/sidekiq/worker.rb +0 -215
@@ -0,0 +1,156 @@
|
|
1
|
+
require "sidekiq"
|
2
|
+
require "date"
|
3
|
+
require "set"
|
4
|
+
|
5
|
+
require "sidekiq/metrics/shared"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
module Metrics
|
9
|
+
# Allows caller to query for Sidekiq execution metrics within Redis.
|
10
|
+
# Caller sets a set of attributes to act as filters. {#fetch} will call
|
11
|
+
# Redis and return a Hash of results.
|
12
|
+
#
|
13
|
+
# NB: all metrics and times/dates are UTC only. We specifically do not
|
14
|
+
# support timezones.
|
15
|
+
class Query
|
16
|
+
def initialize(pool: nil, now: Time.now)
|
17
|
+
@time = now.utc
|
18
|
+
@pool = pool || Sidekiq.default_configuration.redis_pool
|
19
|
+
@klass = nil
|
20
|
+
end
|
21
|
+
|
22
|
+
# Get metric data for all jobs from the last hour
|
23
|
+
# +class_filter+: return only results for classes matching filter
|
24
|
+
def top_jobs(class_filter: nil, minutes: 60)
|
25
|
+
result = Result.new
|
26
|
+
|
27
|
+
time = @time
|
28
|
+
redis_results = @pool.with do |conn|
|
29
|
+
conn.pipelined do |pipe|
|
30
|
+
minutes.times do |idx|
|
31
|
+
key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
|
32
|
+
pipe.hgetall key
|
33
|
+
result.prepend_bucket time
|
34
|
+
time -= 60
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
time = @time
|
40
|
+
redis_results.each do |hash|
|
41
|
+
hash.each do |k, v|
|
42
|
+
kls, metric = k.split("|")
|
43
|
+
next if class_filter && !class_filter.match?(kls)
|
44
|
+
result.job_results[kls].add_metric metric, time, v.to_i
|
45
|
+
end
|
46
|
+
time -= 60
|
47
|
+
end
|
48
|
+
|
49
|
+
result.marks = fetch_marks(result.starts_at..result.ends_at)
|
50
|
+
|
51
|
+
result
|
52
|
+
end
|
53
|
+
|
54
|
+
def for_job(klass, minutes: 60)
|
55
|
+
result = Result.new
|
56
|
+
|
57
|
+
time = @time
|
58
|
+
redis_results = @pool.with do |conn|
|
59
|
+
conn.pipelined do |pipe|
|
60
|
+
minutes.times do |idx|
|
61
|
+
key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
|
62
|
+
pipe.hmget key, "#{klass}|ms", "#{klass}|p", "#{klass}|f"
|
63
|
+
result.prepend_bucket time
|
64
|
+
time -= 60
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
time = @time
|
70
|
+
@pool.with do |conn|
|
71
|
+
redis_results.each do |(ms, p, f)|
|
72
|
+
result.job_results[klass].add_metric "ms", time, ms.to_i if ms
|
73
|
+
result.job_results[klass].add_metric "p", time, p.to_i if p
|
74
|
+
result.job_results[klass].add_metric "f", time, f.to_i if f
|
75
|
+
result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time).reverse
|
76
|
+
time -= 60
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
result.marks = fetch_marks(result.starts_at..result.ends_at)
|
81
|
+
|
82
|
+
result
|
83
|
+
end
|
84
|
+
|
85
|
+
class Result < Struct.new(:starts_at, :ends_at, :size, :buckets, :job_results, :marks)
|
86
|
+
def initialize
|
87
|
+
super
|
88
|
+
self.buckets = []
|
89
|
+
self.marks = []
|
90
|
+
self.job_results = Hash.new { |h, k| h[k] = JobResult.new }
|
91
|
+
end
|
92
|
+
|
93
|
+
def prepend_bucket(time)
|
94
|
+
buckets.unshift time.strftime("%H:%M")
|
95
|
+
self.ends_at ||= time
|
96
|
+
self.starts_at = time
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
class JobResult < Struct.new(:series, :hist, :totals)
|
101
|
+
def initialize
|
102
|
+
super
|
103
|
+
self.series = Hash.new { |h, k| h[k] = Hash.new(0) }
|
104
|
+
self.hist = Hash.new { |h, k| h[k] = [] }
|
105
|
+
self.totals = Hash.new(0)
|
106
|
+
end
|
107
|
+
|
108
|
+
def add_metric(metric, time, value)
|
109
|
+
totals[metric] += value
|
110
|
+
series[metric][time.strftime("%H:%M")] += value
|
111
|
+
|
112
|
+
# Include timing measurements in seconds for convenience
|
113
|
+
add_metric("s", time, value / 1000.0) if metric == "ms"
|
114
|
+
end
|
115
|
+
|
116
|
+
def add_hist(time, hist_result)
|
117
|
+
hist[time.strftime("%H:%M")] = hist_result
|
118
|
+
end
|
119
|
+
|
120
|
+
def total_avg(metric = "ms")
|
121
|
+
completed = totals["p"] - totals["f"]
|
122
|
+
return 0 if completed.zero?
|
123
|
+
totals[metric].to_f / completed
|
124
|
+
end
|
125
|
+
|
126
|
+
def series_avg(metric = "ms")
|
127
|
+
series[metric].each_with_object(Hash.new(0)) do |(bucket, value), result|
|
128
|
+
completed = series.dig("p", bucket) - series.dig("f", bucket)
|
129
|
+
result[bucket] = (completed == 0) ? 0 : value.to_f / completed
|
130
|
+
end
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
class MarkResult < Struct.new(:time, :label)
|
135
|
+
def bucket
|
136
|
+
time.strftime("%H:%M")
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
private
|
141
|
+
|
142
|
+
def fetch_marks(time_range)
|
143
|
+
[].tap do |result|
|
144
|
+
marks = @pool.with { |c| c.hgetall("#{@time.strftime("%Y%m%d")}-marks") }
|
145
|
+
|
146
|
+
marks.each do |timestamp, label|
|
147
|
+
time = Time.parse(timestamp)
|
148
|
+
if time_range.cover? time
|
149
|
+
result << MarkResult.new(time, label)
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
end
|
156
|
+
end
|
@@ -0,0 +1,95 @@
|
|
1
|
+
require "concurrent"
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
module Metrics
|
5
|
+
# This is the only dependency on concurrent-ruby in Sidekiq but it's
|
6
|
+
# mandatory for thread-safety until MRI supports atomic operations on values.
|
7
|
+
Counter = ::Concurrent::AtomicFixnum
|
8
|
+
|
9
|
+
# Implements space-efficient but statistically useful histogram storage.
|
10
|
+
# A precise time histogram stores every time. Instead we break times into a set of
|
11
|
+
# known buckets and increment counts of the associated time bucket. Even if we call
|
12
|
+
# the histogram a million times, we'll still only store 26 buckets.
|
13
|
+
# NB: needs to be thread-safe or resiliant to races.
|
14
|
+
#
|
15
|
+
# To store this data, we use Redis' BITFIELD command to store unsigned 16-bit counters
|
16
|
+
# per bucket per klass per minute. It's unlikely that most people will be executing more
|
17
|
+
# than 1000 job/sec for a full minute of a specific type.
|
18
|
+
class Histogram
|
19
|
+
include Enumerable
|
20
|
+
|
21
|
+
# This number represents the maximum milliseconds for this bucket.
|
22
|
+
# 20 means all job executions up to 20ms, e.g. if a job takes
|
23
|
+
# 280ms, it'll increment bucket[7]. Note we can track job executions
|
24
|
+
# up to about 5.5 minutes. After that, it's assumed you're probably
|
25
|
+
# not too concerned with its performance.
|
26
|
+
BUCKET_INTERVALS = [
|
27
|
+
20, 30, 45, 65, 100,
|
28
|
+
150, 225, 335, 500, 750,
|
29
|
+
1100, 1700, 2500, 3800, 5750,
|
30
|
+
8500, 13000, 20000, 30000, 45000,
|
31
|
+
65000, 100000, 150000, 225000, 335000,
|
32
|
+
1e20 # the "maybe your job is too long" bucket
|
33
|
+
].freeze
|
34
|
+
LABELS = [
|
35
|
+
"20ms", "30ms", "45ms", "65ms", "100ms",
|
36
|
+
"150ms", "225ms", "335ms", "500ms", "750ms",
|
37
|
+
"1.1s", "1.7s", "2.5s", "3.8s", "5.75s",
|
38
|
+
"8.5s", "13s", "20s", "30s", "45s",
|
39
|
+
"65s", "100s", "150s", "225s", "335s",
|
40
|
+
"Slow"
|
41
|
+
].freeze
|
42
|
+
FETCH = "GET u16 #0 GET u16 #1 GET u16 #2 GET u16 #3 \
|
43
|
+
GET u16 #4 GET u16 #5 GET u16 #6 GET u16 #7 \
|
44
|
+
GET u16 #8 GET u16 #9 GET u16 #10 GET u16 #11 \
|
45
|
+
GET u16 #12 GET u16 #13 GET u16 #14 GET u16 #15 \
|
46
|
+
GET u16 #16 GET u16 #17 GET u16 #18 GET u16 #19 \
|
47
|
+
GET u16 #20 GET u16 #21 GET u16 #22 GET u16 #23 \
|
48
|
+
GET u16 #24 GET u16 #25".split
|
49
|
+
HISTOGRAM_TTL = 8 * 60 * 60
|
50
|
+
|
51
|
+
def each
|
52
|
+
buckets.each { |counter| yield counter.value }
|
53
|
+
end
|
54
|
+
|
55
|
+
def label(idx)
|
56
|
+
LABELS[idx]
|
57
|
+
end
|
58
|
+
|
59
|
+
attr_reader :buckets
|
60
|
+
def initialize(klass)
|
61
|
+
@klass = klass
|
62
|
+
@buckets = Array.new(BUCKET_INTERVALS.size) { Counter.new }
|
63
|
+
end
|
64
|
+
|
65
|
+
def record_time(ms)
|
66
|
+
index_to_use = BUCKET_INTERVALS.each_index do |idx|
|
67
|
+
break idx if ms < BUCKET_INTERVALS[idx]
|
68
|
+
end
|
69
|
+
|
70
|
+
@buckets[index_to_use].increment
|
71
|
+
end
|
72
|
+
|
73
|
+
def fetch(conn, now = Time.now)
|
74
|
+
window = now.utc.strftime("%d-%H:%-M")
|
75
|
+
key = "#{@klass}-#{window}"
|
76
|
+
conn.bitfield_ro(key, *FETCH)
|
77
|
+
end
|
78
|
+
|
79
|
+
def persist(conn, now = Time.now)
|
80
|
+
buckets, @buckets = @buckets, []
|
81
|
+
window = now.utc.strftime("%d-%H:%-M")
|
82
|
+
key = "#{@klass}-#{window}"
|
83
|
+
cmd = [key, "OVERFLOW", "SAT"]
|
84
|
+
buckets.each_with_index do |counter, idx|
|
85
|
+
val = counter.value
|
86
|
+
cmd << "INCRBY" << "u16" << "##{idx}" << val.to_s if val > 0
|
87
|
+
end
|
88
|
+
|
89
|
+
conn.bitfield(*cmd) if cmd.size > 3
|
90
|
+
conn.expire(key, HISTOGRAM_TTL)
|
91
|
+
key
|
92
|
+
end
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
@@ -0,0 +1,140 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "time"
|
4
|
+
require "sidekiq"
|
5
|
+
require "sidekiq/metrics/shared"
|
6
|
+
|
7
|
+
# This file contains the components which track execution metrics within Sidekiq.
|
8
|
+
module Sidekiq
|
9
|
+
module Metrics
|
10
|
+
class ExecutionTracker
|
11
|
+
include Sidekiq::Component
|
12
|
+
|
13
|
+
def initialize(config)
|
14
|
+
@config = config
|
15
|
+
@jobs = Hash.new(0)
|
16
|
+
@totals = Hash.new(0)
|
17
|
+
@grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
|
18
|
+
@lock = Mutex.new
|
19
|
+
end
|
20
|
+
|
21
|
+
def track(queue, klass)
|
22
|
+
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
|
23
|
+
time_ms = 0
|
24
|
+
begin
|
25
|
+
begin
|
26
|
+
yield
|
27
|
+
ensure
|
28
|
+
finish = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
|
29
|
+
time_ms = finish - start
|
30
|
+
end
|
31
|
+
# We don't track time for failed jobs as they can have very unpredictable
|
32
|
+
# execution times. more important to know average time for successful jobs so we
|
33
|
+
# can better recognize when a perf regression is introduced.
|
34
|
+
@lock.synchronize {
|
35
|
+
@grams[klass].record_time(time_ms)
|
36
|
+
@jobs["#{klass}|ms"] += time_ms
|
37
|
+
@totals["ms"] += time_ms
|
38
|
+
}
|
39
|
+
rescue Exception
|
40
|
+
@lock.synchronize {
|
41
|
+
@jobs["#{klass}|f"] += 1
|
42
|
+
@totals["f"] += 1
|
43
|
+
}
|
44
|
+
raise
|
45
|
+
ensure
|
46
|
+
@lock.synchronize {
|
47
|
+
@jobs["#{klass}|p"] += 1
|
48
|
+
@totals["p"] += 1
|
49
|
+
}
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
# LONG_TERM = 90 * 24 * 60 * 60
|
54
|
+
# MID_TERM = 7 * 24 * 60 * 60
|
55
|
+
SHORT_TERM = 8 * 60 * 60
|
56
|
+
|
57
|
+
def flush(time = Time.now)
|
58
|
+
totals, jobs, grams = reset
|
59
|
+
procd = totals["p"]
|
60
|
+
fails = totals["f"]
|
61
|
+
return if procd == 0 && fails == 0
|
62
|
+
|
63
|
+
now = time.utc
|
64
|
+
# nowdate = now.strftime("%Y%m%d")
|
65
|
+
# nowhour = now.strftime("%Y%m%d|%-H")
|
66
|
+
nowmin = now.strftime("%Y%m%d|%-H:%-M")
|
67
|
+
count = 0
|
68
|
+
|
69
|
+
redis do |conn|
|
70
|
+
# persist fine-grained histogram data
|
71
|
+
if grams.size > 0
|
72
|
+
conn.pipelined do |pipe|
|
73
|
+
grams.each do |_, gram|
|
74
|
+
gram.persist(pipe, now)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
# persist coarse grained execution count + execution millis.
|
80
|
+
# note as of today we don't use or do anything with the
|
81
|
+
# daily or hourly rollups.
|
82
|
+
[
|
83
|
+
# ["j", jobs, nowdate, LONG_TERM],
|
84
|
+
# ["j", jobs, nowhour, MID_TERM],
|
85
|
+
["j", jobs, nowmin, SHORT_TERM]
|
86
|
+
].each do |prefix, data, bucket, ttl|
|
87
|
+
conn.pipelined do |xa|
|
88
|
+
stats = "#{prefix}|#{bucket}"
|
89
|
+
data.each_pair do |key, value|
|
90
|
+
xa.hincrby stats, key, value
|
91
|
+
count += 1
|
92
|
+
end
|
93
|
+
xa.expire(stats, ttl)
|
94
|
+
end
|
95
|
+
end
|
96
|
+
logger.debug "Flushed #{count} metrics"
|
97
|
+
count
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
private
|
102
|
+
|
103
|
+
def reset
|
104
|
+
@lock.synchronize {
|
105
|
+
array = [@totals, @jobs, @grams]
|
106
|
+
reset_instance_variables
|
107
|
+
array
|
108
|
+
}
|
109
|
+
end
|
110
|
+
|
111
|
+
def reset_instance_variables
|
112
|
+
@totals = Hash.new(0)
|
113
|
+
@jobs = Hash.new(0)
|
114
|
+
@grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
class Middleware
|
119
|
+
include Sidekiq::ServerMiddleware
|
120
|
+
|
121
|
+
def initialize(options)
|
122
|
+
@exec = options
|
123
|
+
end
|
124
|
+
|
125
|
+
def call(_instance, hash, queue, &block)
|
126
|
+
@exec.track(queue, hash["wrapped"] || hash["class"], &block)
|
127
|
+
end
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
Sidekiq.configure_server do |config|
|
133
|
+
exec = Sidekiq::Metrics::ExecutionTracker.new(config)
|
134
|
+
config.server_middleware do |chain|
|
135
|
+
chain.add Sidekiq::Metrics::Middleware, exec
|
136
|
+
end
|
137
|
+
config.on(:beat) do
|
138
|
+
exec.flush
|
139
|
+
end
|
140
|
+
end
|
@@ -1,117 +1,160 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/middleware/modules"
|
4
|
+
|
2
5
|
module Sidekiq
|
3
6
|
# Middleware is code configured to run before/after
|
4
|
-
# a
|
7
|
+
# a job is processed. It is patterned after Rack
|
5
8
|
# middleware. Middleware exists for the client side
|
6
9
|
# (pushing jobs onto the queue) as well as the server
|
7
10
|
# side (when jobs are actually processed).
|
8
11
|
#
|
12
|
+
# Callers will register middleware Classes and Sidekiq will
|
13
|
+
# create new instances of the middleware for every job. This
|
14
|
+
# is important so that instance state is not shared accidentally
|
15
|
+
# between job executions.
|
16
|
+
#
|
9
17
|
# To add middleware for the client:
|
10
18
|
#
|
11
|
-
#
|
12
|
-
#
|
13
|
-
#
|
19
|
+
# Sidekiq.configure_client do |config|
|
20
|
+
# config.client_middleware do |chain|
|
21
|
+
# chain.add MyClientHook
|
22
|
+
# end
|
14
23
|
# end
|
15
|
-
# end
|
16
24
|
#
|
17
25
|
# To modify middleware for the server, just call
|
18
26
|
# with another block:
|
19
27
|
#
|
20
|
-
#
|
21
|
-
#
|
22
|
-
#
|
23
|
-
#
|
28
|
+
# Sidekiq.configure_server do |config|
|
29
|
+
# config.server_middleware do |chain|
|
30
|
+
# chain.add MyServerHook
|
31
|
+
# chain.remove ActiveRecord
|
32
|
+
# end
|
24
33
|
# end
|
25
|
-
# end
|
26
34
|
#
|
27
35
|
# To insert immediately preceding another entry:
|
28
36
|
#
|
29
|
-
#
|
30
|
-
#
|
31
|
-
#
|
37
|
+
# Sidekiq.configure_client do |config|
|
38
|
+
# config.client_middleware do |chain|
|
39
|
+
# chain.insert_before ActiveRecord, MyClientHook
|
40
|
+
# end
|
32
41
|
# end
|
33
|
-
# end
|
34
42
|
#
|
35
43
|
# To insert immediately after another entry:
|
36
44
|
#
|
37
|
-
#
|
38
|
-
#
|
39
|
-
#
|
45
|
+
# Sidekiq.configure_client do |config|
|
46
|
+
# config.client_middleware do |chain|
|
47
|
+
# chain.insert_after ActiveRecord, MyClientHook
|
48
|
+
# end
|
40
49
|
# end
|
41
|
-
# end
|
42
50
|
#
|
43
51
|
# This is an example of a minimal server middleware:
|
44
52
|
#
|
45
|
-
#
|
46
|
-
#
|
47
|
-
#
|
48
|
-
#
|
49
|
-
#
|
53
|
+
# class MyServerHook
|
54
|
+
# include Sidekiq::ServerMiddleware
|
55
|
+
#
|
56
|
+
# def call(job_instance, msg, queue)
|
57
|
+
# logger.info "Before job"
|
58
|
+
# redis {|conn| conn.get("foo") } # do something in Redis
|
59
|
+
# yield
|
60
|
+
# logger.info "After job"
|
61
|
+
# end
|
50
62
|
# end
|
51
|
-
# end
|
52
63
|
#
|
53
64
|
# This is an example of a minimal client middleware, note
|
54
65
|
# the method must return the result or the job will not push
|
55
66
|
# to Redis:
|
56
67
|
#
|
57
|
-
#
|
58
|
-
#
|
59
|
-
#
|
60
|
-
#
|
61
|
-
#
|
62
|
-
#
|
68
|
+
# class MyClientHook
|
69
|
+
# include Sidekiq::ClientMiddleware
|
70
|
+
#
|
71
|
+
# def call(job_class, msg, queue, redis_pool)
|
72
|
+
# logger.info "Before push"
|
73
|
+
# result = yield
|
74
|
+
# logger.info "After push"
|
75
|
+
# result
|
76
|
+
# end
|
63
77
|
# end
|
64
|
-
# end
|
65
78
|
#
|
66
79
|
module Middleware
|
67
80
|
class Chain
|
68
81
|
include Enumerable
|
69
|
-
attr_reader :entries
|
70
|
-
|
71
|
-
def initialize_copy(copy)
|
72
|
-
copy.instance_variable_set(:@entries, entries.dup)
|
73
|
-
end
|
74
82
|
|
83
|
+
# Iterate through each middleware in the chain
|
75
84
|
def each(&block)
|
76
85
|
entries.each(&block)
|
77
86
|
end
|
78
87
|
|
79
|
-
|
80
|
-
|
88
|
+
# @api private
|
89
|
+
def initialize(config = nil) # :nodoc:
|
90
|
+
@config = config
|
91
|
+
@entries = nil
|
81
92
|
yield self if block_given?
|
82
93
|
end
|
83
94
|
|
95
|
+
def entries
|
96
|
+
@entries ||= []
|
97
|
+
end
|
98
|
+
|
99
|
+
def copy_for(capsule)
|
100
|
+
chain = Sidekiq::Middleware::Chain.new(capsule)
|
101
|
+
chain.instance_variable_set(:@entries, entries.dup)
|
102
|
+
chain
|
103
|
+
end
|
104
|
+
|
105
|
+
# Remove all middleware matching the given Class
|
106
|
+
# @param klass [Class]
|
84
107
|
def remove(klass)
|
85
108
|
entries.delete_if { |entry| entry.klass == klass }
|
86
109
|
end
|
87
110
|
|
111
|
+
# Add the given middleware to the end of the chain.
|
112
|
+
# Sidekiq will call `klass.new(*args)` to create a clean
|
113
|
+
# copy of your middleware for every job executed.
|
114
|
+
#
|
115
|
+
# chain.add(Statsd::Metrics, { collector: "localhost:8125" })
|
116
|
+
#
|
117
|
+
# @param klass [Class] Your middleware class
|
118
|
+
# @param *args [Array<Object>] Set of arguments to pass to every instance of your middleware
|
88
119
|
def add(klass, *args)
|
89
|
-
remove(klass)
|
90
|
-
entries << Entry.new(klass, *args)
|
120
|
+
remove(klass)
|
121
|
+
entries << Entry.new(@config, klass, *args)
|
91
122
|
end
|
92
123
|
|
124
|
+
# Identical to {#add} except the middleware is added to the front of the chain.
|
93
125
|
def prepend(klass, *args)
|
94
|
-
remove(klass)
|
95
|
-
entries.insert(0, Entry.new(klass, *args))
|
126
|
+
remove(klass)
|
127
|
+
entries.insert(0, Entry.new(@config, klass, *args))
|
96
128
|
end
|
97
129
|
|
130
|
+
# Inserts +newklass+ before +oldklass+ in the chain.
|
131
|
+
# Useful if one middleware must run before another middleware.
|
98
132
|
def insert_before(oldklass, newklass, *args)
|
99
133
|
i = entries.index { |entry| entry.klass == newklass }
|
100
|
-
new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
|
134
|
+
new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
|
101
135
|
i = entries.index { |entry| entry.klass == oldklass } || 0
|
102
136
|
entries.insert(i, new_entry)
|
103
137
|
end
|
104
138
|
|
139
|
+
# Inserts +newklass+ after +oldklass+ in the chain.
|
140
|
+
# Useful if one middleware must run after another middleware.
|
105
141
|
def insert_after(oldklass, newklass, *args)
|
106
142
|
i = entries.index { |entry| entry.klass == newklass }
|
107
|
-
new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
|
143
|
+
new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
|
108
144
|
i = entries.index { |entry| entry.klass == oldklass } || entries.count - 1
|
109
|
-
entries.insert(i+1, new_entry)
|
145
|
+
entries.insert(i + 1, new_entry)
|
110
146
|
end
|
111
147
|
|
148
|
+
# @return [Boolean] if the given class is already in the chain
|
112
149
|
def exists?(klass)
|
113
150
|
any? { |entry| entry.klass == klass }
|
114
151
|
end
|
152
|
+
alias_method :include?, :exists?
|
153
|
+
|
154
|
+
# @return [Boolean] if the chain contains no middleware
|
155
|
+
def empty?
|
156
|
+
@entries.nil? || @entries.empty?
|
157
|
+
end
|
115
158
|
|
116
159
|
def retrieve
|
117
160
|
map(&:make_new)
|
@@ -121,29 +164,43 @@ module Sidekiq
|
|
121
164
|
entries.clear
|
122
165
|
end
|
123
166
|
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
167
|
+
# Used by Sidekiq to execute the middleware at runtime
|
168
|
+
# @api private
|
169
|
+
def invoke(*args, &block)
|
170
|
+
return yield if empty?
|
171
|
+
|
172
|
+
chain = retrieve
|
173
|
+
traverse(chain, 0, args, &block)
|
174
|
+
end
|
175
|
+
|
176
|
+
private
|
177
|
+
|
178
|
+
def traverse(chain, index, args, &block)
|
179
|
+
if index >= chain.size
|
180
|
+
yield
|
181
|
+
else
|
182
|
+
chain[index].call(*args) do
|
183
|
+
traverse(chain, index + 1, args, &block)
|
131
184
|
end
|
132
185
|
end
|
133
|
-
traverse_chain.call
|
134
186
|
end
|
135
187
|
end
|
136
188
|
|
189
|
+
# Represents each link in the middleware chain
|
190
|
+
# @api private
|
137
191
|
class Entry
|
138
192
|
attr_reader :klass
|
139
193
|
|
140
|
-
def initialize(klass, *args)
|
194
|
+
def initialize(config, klass, *args)
|
195
|
+
@config = config
|
141
196
|
@klass = klass
|
142
|
-
@args
|
197
|
+
@args = args
|
143
198
|
end
|
144
199
|
|
145
200
|
def make_new
|
146
|
-
@klass.new(*@args)
|
201
|
+
x = @klass.new(*@args)
|
202
|
+
x.config = @config if @config && x.respond_to?(:config=)
|
203
|
+
x
|
147
204
|
end
|
148
205
|
end
|
149
206
|
end
|