sidekiq 5.1.3 → 7.3.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/Changes.md +756 -8
- data/LICENSE.txt +9 -0
- data/README.md +48 -51
- data/bin/multi_queue_bench +271 -0
- data/bin/sidekiq +22 -3
- data/bin/sidekiqload +213 -115
- data/bin/sidekiqmon +11 -0
- data/lib/generators/sidekiq/job_generator.rb +59 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +640 -330
- data/lib/sidekiq/capsule.rb +132 -0
- data/lib/sidekiq/cli.rb +244 -257
- data/lib/sidekiq/client.rb +132 -103
- data/lib/sidekiq/component.rb +68 -0
- data/lib/sidekiq/config.rb +293 -0
- data/lib/sidekiq/deploy.rb +64 -0
- data/lib/sidekiq/embedded.rb +63 -0
- data/lib/sidekiq/fetch.rb +49 -42
- data/lib/sidekiq/iterable_job.rb +55 -0
- data/lib/sidekiq/job/interrupt_handler.rb +24 -0
- data/lib/sidekiq/job/iterable/active_record_enumerator.rb +53 -0
- data/lib/sidekiq/job/iterable/csv_enumerator.rb +47 -0
- data/lib/sidekiq/job/iterable/enumerators.rb +135 -0
- data/lib/sidekiq/job/iterable.rb +231 -0
- data/lib/sidekiq/job.rb +385 -0
- data/lib/sidekiq/job_logger.rb +49 -12
- data/lib/sidekiq/job_retry.rb +167 -103
- data/lib/sidekiq/job_util.rb +109 -0
- data/lib/sidekiq/launcher.rb +209 -102
- data/lib/sidekiq/logger.rb +131 -0
- data/lib/sidekiq/manager.rb +43 -46
- data/lib/sidekiq/metrics/query.rb +158 -0
- data/lib/sidekiq/metrics/shared.rb +97 -0
- data/lib/sidekiq/metrics/tracking.rb +148 -0
- data/lib/sidekiq/middleware/chain.rb +113 -56
- data/lib/sidekiq/middleware/current_attributes.rb +113 -0
- data/lib/sidekiq/middleware/i18n.rb +7 -7
- data/lib/sidekiq/middleware/modules.rb +23 -0
- data/lib/sidekiq/monitor.rb +147 -0
- data/lib/sidekiq/paginator.rb +28 -16
- data/lib/sidekiq/processor.rb +175 -112
- data/lib/sidekiq/rails.rb +54 -39
- data/lib/sidekiq/redis_client_adapter.rb +114 -0
- data/lib/sidekiq/redis_connection.rb +65 -86
- data/lib/sidekiq/ring_buffer.rb +31 -0
- data/lib/sidekiq/scheduled.rb +139 -48
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +26 -0
- data/lib/sidekiq/testing/inline.rb +6 -5
- data/lib/sidekiq/testing.rb +95 -94
- data/lib/sidekiq/transaction_aware_client.rb +51 -0
- data/lib/sidekiq/version.rb +3 -1
- data/lib/sidekiq/web/action.rb +22 -12
- data/lib/sidekiq/web/application.rb +225 -76
- data/lib/sidekiq/web/csrf_protection.rb +183 -0
- data/lib/sidekiq/web/helpers.rb +215 -118
- data/lib/sidekiq/web/router.rb +23 -19
- data/lib/sidekiq/web.rb +114 -106
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +95 -182
- data/sidekiq.gemspec +26 -23
- data/web/assets/images/apple-touch-icon.png +0 -0
- data/web/assets/javascripts/application.js +157 -61
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +192 -0
- data/web/assets/javascripts/dashboard.js +35 -283
- data/web/assets/javascripts/metrics.js +298 -0
- data/web/assets/stylesheets/application-dark.css +147 -0
- data/web/assets/stylesheets/application-rtl.css +10 -93
- data/web/assets/stylesheets/application.css +169 -522
- data/web/assets/stylesheets/bootstrap.css +2 -2
- data/web/locales/ar.yml +71 -64
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -53
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +86 -65
- data/web/locales/es.yml +70 -54
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +83 -62
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +75 -64
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +83 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +68 -63
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/tr.yml +101 -0
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +18 -3
- data/web/views/_job_info.erb +21 -4
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +4 -18
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +79 -29
- data/web/views/dashboard.erb +49 -19
- data/web/views/dead.erb +3 -3
- data/web/views/filtering.erb +7 -0
- data/web/views/layout.erb +9 -7
- data/web/views/metrics.erb +91 -0
- data/web/views/metrics_for_job.erb +59 -0
- data/web/views/morgue.erb +14 -15
- data/web/views/queue.erb +33 -23
- data/web/views/queues.erb +19 -5
- data/web/views/retries.erb +19 -16
- data/web/views/retry.erb +3 -3
- data/web/views/scheduled.erb +17 -15
- metadata +84 -129
- data/.github/contributing.md +0 -32
- data/.github/issue_template.md +0 -11
- data/.gitignore +0 -13
- data/.travis.yml +0 -14
- data/3.0-Upgrade.md +0 -70
- data/4.0-Upgrade.md +0 -53
- data/5.0-Upgrade.md +0 -56
- data/COMM-LICENSE +0 -95
- data/Ent-Changes.md +0 -216
- data/Gemfile +0 -8
- data/LICENSE +0 -9
- data/Pro-2.0-Upgrade.md +0 -138
- data/Pro-3.0-Upgrade.md +0 -44
- data/Pro-4.0-Upgrade.md +0 -35
- data/Pro-Changes.md +0 -729
- data/Rakefile +0 -8
- data/bin/sidekiqctl +0 -99
- data/code_of_conduct.md +0 -50
- data/lib/generators/sidekiq/worker_generator.rb +0 -49
- data/lib/sidekiq/core_ext.rb +0 -1
- data/lib/sidekiq/delay.rb +0 -42
- data/lib/sidekiq/exception_handler.rb +0 -29
- data/lib/sidekiq/extensions/action_mailer.rb +0 -57
- data/lib/sidekiq/extensions/active_record.rb +0 -40
- data/lib/sidekiq/extensions/class_methods.rb +0 -40
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -31
- data/lib/sidekiq/logging.rb +0 -122
- data/lib/sidekiq/middleware/server/active_record.rb +0 -23
- data/lib/sidekiq/util.rb +0 -66
- data/lib/sidekiq/worker.rb +0 -204
@@ -0,0 +1,158 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq"
|
4
|
+
require "date"
|
5
|
+
require "set"
|
6
|
+
|
7
|
+
require "sidekiq/metrics/shared"
|
8
|
+
|
9
|
+
module Sidekiq
|
10
|
+
module Metrics
|
11
|
+
# Allows caller to query for Sidekiq execution metrics within Redis.
|
12
|
+
# Caller sets a set of attributes to act as filters. {#fetch} will call
|
13
|
+
# Redis and return a Hash of results.
|
14
|
+
#
|
15
|
+
# NB: all metrics and times/dates are UTC only. We specifically do not
|
16
|
+
# support timezones.
|
17
|
+
class Query
|
18
|
+
def initialize(pool: nil, now: Time.now)
|
19
|
+
@time = now.utc
|
20
|
+
@pool = pool || Sidekiq.default_configuration.redis_pool
|
21
|
+
@klass = nil
|
22
|
+
end
|
23
|
+
|
24
|
+
# Get metric data for all jobs from the last hour
|
25
|
+
# +class_filter+: return only results for classes matching filter
|
26
|
+
def top_jobs(class_filter: nil, minutes: 60)
|
27
|
+
result = Result.new
|
28
|
+
|
29
|
+
time = @time
|
30
|
+
redis_results = @pool.with do |conn|
|
31
|
+
conn.pipelined do |pipe|
|
32
|
+
minutes.times do |idx|
|
33
|
+
key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
|
34
|
+
pipe.hgetall key
|
35
|
+
result.prepend_bucket time
|
36
|
+
time -= 60
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
time = @time
|
42
|
+
redis_results.each do |hash|
|
43
|
+
hash.each do |k, v|
|
44
|
+
kls, metric = k.split("|")
|
45
|
+
next if class_filter && !class_filter.match?(kls)
|
46
|
+
result.job_results[kls].add_metric metric, time, v.to_i
|
47
|
+
end
|
48
|
+
time -= 60
|
49
|
+
end
|
50
|
+
|
51
|
+
result.marks = fetch_marks(result.starts_at..result.ends_at)
|
52
|
+
|
53
|
+
result
|
54
|
+
end
|
55
|
+
|
56
|
+
def for_job(klass, minutes: 60)
|
57
|
+
result = Result.new
|
58
|
+
|
59
|
+
time = @time
|
60
|
+
redis_results = @pool.with do |conn|
|
61
|
+
conn.pipelined do |pipe|
|
62
|
+
minutes.times do |idx|
|
63
|
+
key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
|
64
|
+
pipe.hmget key, "#{klass}|ms", "#{klass}|p", "#{klass}|f"
|
65
|
+
result.prepend_bucket time
|
66
|
+
time -= 60
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
time = @time
|
72
|
+
@pool.with do |conn|
|
73
|
+
redis_results.each do |(ms, p, f)|
|
74
|
+
result.job_results[klass].add_metric "ms", time, ms.to_i if ms
|
75
|
+
result.job_results[klass].add_metric "p", time, p.to_i if p
|
76
|
+
result.job_results[klass].add_metric "f", time, f.to_i if f
|
77
|
+
result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time).reverse
|
78
|
+
time -= 60
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
result.marks = fetch_marks(result.starts_at..result.ends_at)
|
83
|
+
|
84
|
+
result
|
85
|
+
end
|
86
|
+
|
87
|
+
class Result < Struct.new(:starts_at, :ends_at, :size, :buckets, :job_results, :marks)
|
88
|
+
def initialize
|
89
|
+
super
|
90
|
+
self.buckets = []
|
91
|
+
self.marks = []
|
92
|
+
self.job_results = Hash.new { |h, k| h[k] = JobResult.new }
|
93
|
+
end
|
94
|
+
|
95
|
+
def prepend_bucket(time)
|
96
|
+
buckets.unshift time.strftime("%H:%M")
|
97
|
+
self.ends_at ||= time
|
98
|
+
self.starts_at = time
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
class JobResult < Struct.new(:series, :hist, :totals)
|
103
|
+
def initialize
|
104
|
+
super
|
105
|
+
self.series = Hash.new { |h, k| h[k] = Hash.new(0) }
|
106
|
+
self.hist = Hash.new { |h, k| h[k] = [] }
|
107
|
+
self.totals = Hash.new(0)
|
108
|
+
end
|
109
|
+
|
110
|
+
def add_metric(metric, time, value)
|
111
|
+
totals[metric] += value
|
112
|
+
series[metric][time.strftime("%H:%M")] += value
|
113
|
+
|
114
|
+
# Include timing measurements in seconds for convenience
|
115
|
+
add_metric("s", time, value / 1000.0) if metric == "ms"
|
116
|
+
end
|
117
|
+
|
118
|
+
def add_hist(time, hist_result)
|
119
|
+
hist[time.strftime("%H:%M")] = hist_result
|
120
|
+
end
|
121
|
+
|
122
|
+
def total_avg(metric = "ms")
|
123
|
+
completed = totals["p"] - totals["f"]
|
124
|
+
return 0 if completed.zero?
|
125
|
+
totals[metric].to_f / completed
|
126
|
+
end
|
127
|
+
|
128
|
+
def series_avg(metric = "ms")
|
129
|
+
series[metric].each_with_object(Hash.new(0)) do |(bucket, value), result|
|
130
|
+
completed = series.dig("p", bucket) - series.dig("f", bucket)
|
131
|
+
result[bucket] = (completed == 0) ? 0 : value.to_f / completed
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
class MarkResult < Struct.new(:time, :label)
|
137
|
+
def bucket
|
138
|
+
time.strftime("%H:%M")
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
private
|
143
|
+
|
144
|
+
def fetch_marks(time_range)
|
145
|
+
[].tap do |result|
|
146
|
+
marks = @pool.with { |c| c.hgetall("#{@time.strftime("%Y%m%d")}-marks") }
|
147
|
+
|
148
|
+
marks.each do |timestamp, label|
|
149
|
+
time = Time.parse(timestamp)
|
150
|
+
if time_range.cover? time
|
151
|
+
result << MarkResult.new(time, label)
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
end
|
156
|
+
end
|
157
|
+
end
|
158
|
+
end
|
@@ -0,0 +1,97 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "concurrent"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
module Metrics
|
7
|
+
# This is the only dependency on concurrent-ruby in Sidekiq but it's
|
8
|
+
# mandatory for thread-safety until MRI supports atomic operations on values.
|
9
|
+
Counter = ::Concurrent::AtomicFixnum
|
10
|
+
|
11
|
+
# Implements space-efficient but statistically useful histogram storage.
|
12
|
+
# A precise time histogram stores every time. Instead we break times into a set of
|
13
|
+
# known buckets and increment counts of the associated time bucket. Even if we call
|
14
|
+
# the histogram a million times, we'll still only store 26 buckets.
|
15
|
+
# NB: needs to be thread-safe or resiliant to races.
|
16
|
+
#
|
17
|
+
# To store this data, we use Redis' BITFIELD command to store unsigned 16-bit counters
|
18
|
+
# per bucket per klass per minute. It's unlikely that most people will be executing more
|
19
|
+
# than 1000 job/sec for a full minute of a specific type.
|
20
|
+
class Histogram
|
21
|
+
include Enumerable
|
22
|
+
|
23
|
+
# This number represents the maximum milliseconds for this bucket.
|
24
|
+
# 20 means all job executions up to 20ms, e.g. if a job takes
|
25
|
+
# 280ms, it'll increment bucket[7]. Note we can track job executions
|
26
|
+
# up to about 5.5 minutes. After that, it's assumed you're probably
|
27
|
+
# not too concerned with its performance.
|
28
|
+
BUCKET_INTERVALS = [
|
29
|
+
20, 30, 45, 65, 100,
|
30
|
+
150, 225, 335, 500, 750,
|
31
|
+
1100, 1700, 2500, 3800, 5750,
|
32
|
+
8500, 13000, 20000, 30000, 45000,
|
33
|
+
65000, 100000, 150000, 225000, 335000,
|
34
|
+
1e20 # the "maybe your job is too long" bucket
|
35
|
+
].freeze
|
36
|
+
LABELS = [
|
37
|
+
"20ms", "30ms", "45ms", "65ms", "100ms",
|
38
|
+
"150ms", "225ms", "335ms", "500ms", "750ms",
|
39
|
+
"1.1s", "1.7s", "2.5s", "3.8s", "5.75s",
|
40
|
+
"8.5s", "13s", "20s", "30s", "45s",
|
41
|
+
"65s", "100s", "150s", "225s", "335s",
|
42
|
+
"Slow"
|
43
|
+
].freeze
|
44
|
+
FETCH = "GET u16 #0 GET u16 #1 GET u16 #2 GET u16 #3 \
|
45
|
+
GET u16 #4 GET u16 #5 GET u16 #6 GET u16 #7 \
|
46
|
+
GET u16 #8 GET u16 #9 GET u16 #10 GET u16 #11 \
|
47
|
+
GET u16 #12 GET u16 #13 GET u16 #14 GET u16 #15 \
|
48
|
+
GET u16 #16 GET u16 #17 GET u16 #18 GET u16 #19 \
|
49
|
+
GET u16 #20 GET u16 #21 GET u16 #22 GET u16 #23 \
|
50
|
+
GET u16 #24 GET u16 #25".split
|
51
|
+
HISTOGRAM_TTL = 8 * 60 * 60
|
52
|
+
|
53
|
+
def each
|
54
|
+
buckets.each { |counter| yield counter.value }
|
55
|
+
end
|
56
|
+
|
57
|
+
def label(idx)
|
58
|
+
LABELS[idx]
|
59
|
+
end
|
60
|
+
|
61
|
+
attr_reader :buckets
|
62
|
+
def initialize(klass)
|
63
|
+
@klass = klass
|
64
|
+
@buckets = Array.new(BUCKET_INTERVALS.size) { Counter.new }
|
65
|
+
end
|
66
|
+
|
67
|
+
def record_time(ms)
|
68
|
+
index_to_use = BUCKET_INTERVALS.each_index do |idx|
|
69
|
+
break idx if ms < BUCKET_INTERVALS[idx]
|
70
|
+
end
|
71
|
+
|
72
|
+
@buckets[index_to_use].increment
|
73
|
+
end
|
74
|
+
|
75
|
+
def fetch(conn, now = Time.now)
|
76
|
+
window = now.utc.strftime("%d-%H:%-M")
|
77
|
+
key = "#{@klass}-#{window}"
|
78
|
+
conn.bitfield_ro(key, *FETCH)
|
79
|
+
end
|
80
|
+
|
81
|
+
def persist(conn, now = Time.now)
|
82
|
+
buckets, @buckets = @buckets, []
|
83
|
+
window = now.utc.strftime("%d-%H:%-M")
|
84
|
+
key = "#{@klass}-#{window}"
|
85
|
+
cmd = [key, "OVERFLOW", "SAT"]
|
86
|
+
buckets.each_with_index do |counter, idx|
|
87
|
+
val = counter.value
|
88
|
+
cmd << "INCRBY" << "u16" << "##{idx}" << val.to_s if val > 0
|
89
|
+
end
|
90
|
+
|
91
|
+
conn.bitfield(*cmd) if cmd.size > 3
|
92
|
+
conn.expire(key, HISTOGRAM_TTL)
|
93
|
+
key
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
@@ -0,0 +1,148 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "time"
|
4
|
+
require "sidekiq"
|
5
|
+
require "sidekiq/metrics/shared"
|
6
|
+
|
7
|
+
# This file contains the components which track execution metrics within Sidekiq.
|
8
|
+
module Sidekiq
|
9
|
+
module Metrics
|
10
|
+
class ExecutionTracker
|
11
|
+
include Sidekiq::Component
|
12
|
+
|
13
|
+
def initialize(config)
|
14
|
+
@config = config
|
15
|
+
@jobs = Hash.new(0)
|
16
|
+
@totals = Hash.new(0)
|
17
|
+
@grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
|
18
|
+
@lock = Mutex.new
|
19
|
+
end
|
20
|
+
|
21
|
+
def track(queue, klass)
|
22
|
+
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
|
23
|
+
time_ms = 0
|
24
|
+
begin
|
25
|
+
begin
|
26
|
+
yield
|
27
|
+
ensure
|
28
|
+
finish = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
|
29
|
+
time_ms = finish - start
|
30
|
+
end
|
31
|
+
# We don't track time for failed jobs as they can have very unpredictable
|
32
|
+
# execution times. more important to know average time for successful jobs so we
|
33
|
+
# can better recognize when a perf regression is introduced.
|
34
|
+
track_time(klass, time_ms)
|
35
|
+
rescue JobRetry::Skip
|
36
|
+
# This is raised when iterable job is interrupted.
|
37
|
+
track_time(klass, time_ms)
|
38
|
+
raise
|
39
|
+
rescue Exception
|
40
|
+
@lock.synchronize {
|
41
|
+
@jobs["#{klass}|f"] += 1
|
42
|
+
@totals["f"] += 1
|
43
|
+
}
|
44
|
+
raise
|
45
|
+
ensure
|
46
|
+
@lock.synchronize {
|
47
|
+
@jobs["#{klass}|p"] += 1
|
48
|
+
@totals["p"] += 1
|
49
|
+
}
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
# LONG_TERM = 90 * 24 * 60 * 60
|
54
|
+
# MID_TERM = 7 * 24 * 60 * 60
|
55
|
+
SHORT_TERM = 8 * 60 * 60
|
56
|
+
|
57
|
+
def flush(time = Time.now)
|
58
|
+
totals, jobs, grams = reset
|
59
|
+
procd = totals["p"]
|
60
|
+
fails = totals["f"]
|
61
|
+
return if procd == 0 && fails == 0
|
62
|
+
|
63
|
+
now = time.utc
|
64
|
+
# nowdate = now.strftime("%Y%m%d")
|
65
|
+
# nowhour = now.strftime("%Y%m%d|%-H")
|
66
|
+
nowmin = now.strftime("%Y%m%d|%-H:%-M")
|
67
|
+
count = 0
|
68
|
+
|
69
|
+
redis do |conn|
|
70
|
+
# persist fine-grained histogram data
|
71
|
+
if grams.size > 0
|
72
|
+
conn.pipelined do |pipe|
|
73
|
+
grams.each do |_, gram|
|
74
|
+
gram.persist(pipe, now)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
# persist coarse grained execution count + execution millis.
|
80
|
+
# note as of today we don't use or do anything with the
|
81
|
+
# daily or hourly rollups.
|
82
|
+
[
|
83
|
+
# ["j", jobs, nowdate, LONG_TERM],
|
84
|
+
# ["j", jobs, nowhour, MID_TERM],
|
85
|
+
["j", jobs, nowmin, SHORT_TERM]
|
86
|
+
].each do |prefix, data, bucket, ttl|
|
87
|
+
conn.pipelined do |xa|
|
88
|
+
stats = "#{prefix}|#{bucket}"
|
89
|
+
data.each_pair do |key, value|
|
90
|
+
xa.hincrby stats, key, value
|
91
|
+
count += 1
|
92
|
+
end
|
93
|
+
xa.expire(stats, ttl)
|
94
|
+
end
|
95
|
+
end
|
96
|
+
logger.debug "Flushed #{count} metrics"
|
97
|
+
count
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
private
|
102
|
+
|
103
|
+
def track_time(klass, time_ms)
|
104
|
+
@lock.synchronize {
|
105
|
+
@grams[klass].record_time(time_ms)
|
106
|
+
@jobs["#{klass}|ms"] += time_ms
|
107
|
+
@totals["ms"] += time_ms
|
108
|
+
}
|
109
|
+
end
|
110
|
+
|
111
|
+
def reset
|
112
|
+
@lock.synchronize {
|
113
|
+
array = [@totals, @jobs, @grams]
|
114
|
+
reset_instance_variables
|
115
|
+
array
|
116
|
+
}
|
117
|
+
end
|
118
|
+
|
119
|
+
def reset_instance_variables
|
120
|
+
@totals = Hash.new(0)
|
121
|
+
@jobs = Hash.new(0)
|
122
|
+
@grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
class Middleware
|
127
|
+
include Sidekiq::ServerMiddleware
|
128
|
+
|
129
|
+
def initialize(options)
|
130
|
+
@exec = options
|
131
|
+
end
|
132
|
+
|
133
|
+
def call(_instance, hash, queue, &block)
|
134
|
+
@exec.track(queue, hash["wrapped"] || hash["class"], &block)
|
135
|
+
end
|
136
|
+
end
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
Sidekiq.configure_server do |config|
|
141
|
+
exec = Sidekiq::Metrics::ExecutionTracker.new(config)
|
142
|
+
config.server_middleware do |chain|
|
143
|
+
chain.add Sidekiq::Metrics::Middleware, exec
|
144
|
+
end
|
145
|
+
config.on(:beat) do
|
146
|
+
exec.flush
|
147
|
+
end
|
148
|
+
end
|
@@ -1,117 +1,160 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/middleware/modules"
|
4
|
+
|
2
5
|
module Sidekiq
|
3
6
|
# Middleware is code configured to run before/after
|
4
|
-
# a
|
7
|
+
# a job is processed. It is patterned after Rack
|
5
8
|
# middleware. Middleware exists for the client side
|
6
9
|
# (pushing jobs onto the queue) as well as the server
|
7
10
|
# side (when jobs are actually processed).
|
8
11
|
#
|
12
|
+
# Callers will register middleware Classes and Sidekiq will
|
13
|
+
# create new instances of the middleware for every job. This
|
14
|
+
# is important so that instance state is not shared accidentally
|
15
|
+
# between job executions.
|
16
|
+
#
|
9
17
|
# To add middleware for the client:
|
10
18
|
#
|
11
|
-
#
|
12
|
-
#
|
13
|
-
#
|
19
|
+
# Sidekiq.configure_client do |config|
|
20
|
+
# config.client_middleware do |chain|
|
21
|
+
# chain.add MyClientHook
|
22
|
+
# end
|
14
23
|
# end
|
15
|
-
# end
|
16
24
|
#
|
17
25
|
# To modify middleware for the server, just call
|
18
26
|
# with another block:
|
19
27
|
#
|
20
|
-
#
|
21
|
-
#
|
22
|
-
#
|
23
|
-
#
|
28
|
+
# Sidekiq.configure_server do |config|
|
29
|
+
# config.server_middleware do |chain|
|
30
|
+
# chain.add MyServerHook
|
31
|
+
# chain.remove ActiveRecord
|
32
|
+
# end
|
24
33
|
# end
|
25
|
-
# end
|
26
34
|
#
|
27
35
|
# To insert immediately preceding another entry:
|
28
36
|
#
|
29
|
-
#
|
30
|
-
#
|
31
|
-
#
|
37
|
+
# Sidekiq.configure_client do |config|
|
38
|
+
# config.client_middleware do |chain|
|
39
|
+
# chain.insert_before ActiveRecord, MyClientHook
|
40
|
+
# end
|
32
41
|
# end
|
33
|
-
# end
|
34
42
|
#
|
35
43
|
# To insert immediately after another entry:
|
36
44
|
#
|
37
|
-
#
|
38
|
-
#
|
39
|
-
#
|
45
|
+
# Sidekiq.configure_client do |config|
|
46
|
+
# config.client_middleware do |chain|
|
47
|
+
# chain.insert_after ActiveRecord, MyClientHook
|
48
|
+
# end
|
40
49
|
# end
|
41
|
-
# end
|
42
50
|
#
|
43
51
|
# This is an example of a minimal server middleware:
|
44
52
|
#
|
45
|
-
#
|
46
|
-
#
|
47
|
-
#
|
48
|
-
#
|
49
|
-
#
|
53
|
+
# class MyServerHook
|
54
|
+
# include Sidekiq::ServerMiddleware
|
55
|
+
#
|
56
|
+
# def call(job_instance, msg, queue)
|
57
|
+
# logger.info "Before job"
|
58
|
+
# redis {|conn| conn.get("foo") } # do something in Redis
|
59
|
+
# yield
|
60
|
+
# logger.info "After job"
|
61
|
+
# end
|
50
62
|
# end
|
51
|
-
# end
|
52
63
|
#
|
53
64
|
# This is an example of a minimal client middleware, note
|
54
65
|
# the method must return the result or the job will not push
|
55
66
|
# to Redis:
|
56
67
|
#
|
57
|
-
#
|
58
|
-
#
|
59
|
-
#
|
60
|
-
#
|
61
|
-
#
|
62
|
-
#
|
68
|
+
# class MyClientHook
|
69
|
+
# include Sidekiq::ClientMiddleware
|
70
|
+
#
|
71
|
+
# def call(job_class, msg, queue, redis_pool)
|
72
|
+
# logger.info "Before push"
|
73
|
+
# result = yield
|
74
|
+
# logger.info "After push"
|
75
|
+
# result
|
76
|
+
# end
|
63
77
|
# end
|
64
|
-
# end
|
65
78
|
#
|
66
79
|
module Middleware
|
67
80
|
class Chain
|
68
81
|
include Enumerable
|
69
|
-
attr_reader :entries
|
70
|
-
|
71
|
-
def initialize_copy(copy)
|
72
|
-
copy.instance_variable_set(:@entries, entries.dup)
|
73
|
-
end
|
74
82
|
|
83
|
+
# Iterate through each middleware in the chain
|
75
84
|
def each(&block)
|
76
85
|
entries.each(&block)
|
77
86
|
end
|
78
87
|
|
79
|
-
|
80
|
-
|
88
|
+
# @api private
|
89
|
+
def initialize(config = nil) # :nodoc:
|
90
|
+
@config = config
|
91
|
+
@entries = nil
|
81
92
|
yield self if block_given?
|
82
93
|
end
|
83
94
|
|
95
|
+
def entries
|
96
|
+
@entries ||= []
|
97
|
+
end
|
98
|
+
|
99
|
+
def copy_for(capsule)
|
100
|
+
chain = Sidekiq::Middleware::Chain.new(capsule)
|
101
|
+
chain.instance_variable_set(:@entries, entries.dup)
|
102
|
+
chain
|
103
|
+
end
|
104
|
+
|
105
|
+
# Remove all middleware matching the given Class
|
106
|
+
# @param klass [Class]
|
84
107
|
def remove(klass)
|
85
108
|
entries.delete_if { |entry| entry.klass == klass }
|
86
109
|
end
|
87
110
|
|
111
|
+
# Add the given middleware to the end of the chain.
|
112
|
+
# Sidekiq will call `klass.new(*args)` to create a clean
|
113
|
+
# copy of your middleware for every job executed.
|
114
|
+
#
|
115
|
+
# chain.add(Statsd::Metrics, { collector: "localhost:8125" })
|
116
|
+
#
|
117
|
+
# @param klass [Class] Your middleware class
|
118
|
+
# @param *args [Array<Object>] Set of arguments to pass to every instance of your middleware
|
88
119
|
def add(klass, *args)
|
89
|
-
remove(klass)
|
90
|
-
entries << Entry.new(klass, *args)
|
120
|
+
remove(klass)
|
121
|
+
entries << Entry.new(@config, klass, *args)
|
91
122
|
end
|
92
123
|
|
124
|
+
# Identical to {#add} except the middleware is added to the front of the chain.
|
93
125
|
def prepend(klass, *args)
|
94
|
-
remove(klass)
|
95
|
-
entries.insert(0, Entry.new(klass, *args))
|
126
|
+
remove(klass)
|
127
|
+
entries.insert(0, Entry.new(@config, klass, *args))
|
96
128
|
end
|
97
129
|
|
130
|
+
# Inserts +newklass+ before +oldklass+ in the chain.
|
131
|
+
# Useful if one middleware must run before another middleware.
|
98
132
|
def insert_before(oldklass, newklass, *args)
|
99
133
|
i = entries.index { |entry| entry.klass == newklass }
|
100
|
-
new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
|
134
|
+
new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
|
101
135
|
i = entries.index { |entry| entry.klass == oldklass } || 0
|
102
136
|
entries.insert(i, new_entry)
|
103
137
|
end
|
104
138
|
|
139
|
+
# Inserts +newklass+ after +oldklass+ in the chain.
|
140
|
+
# Useful if one middleware must run after another middleware.
|
105
141
|
def insert_after(oldklass, newklass, *args)
|
106
142
|
i = entries.index { |entry| entry.klass == newklass }
|
107
|
-
new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
|
143
|
+
new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
|
108
144
|
i = entries.index { |entry| entry.klass == oldklass } || entries.count - 1
|
109
|
-
entries.insert(i+1, new_entry)
|
145
|
+
entries.insert(i + 1, new_entry)
|
110
146
|
end
|
111
147
|
|
148
|
+
# @return [Boolean] if the given class is already in the chain
|
112
149
|
def exists?(klass)
|
113
150
|
any? { |entry| entry.klass == klass }
|
114
151
|
end
|
152
|
+
alias_method :include?, :exists?
|
153
|
+
|
154
|
+
# @return [Boolean] if the chain contains no middleware
|
155
|
+
def empty?
|
156
|
+
@entries.nil? || @entries.empty?
|
157
|
+
end
|
115
158
|
|
116
159
|
def retrieve
|
117
160
|
map(&:make_new)
|
@@ -121,29 +164,43 @@ module Sidekiq
|
|
121
164
|
entries.clear
|
122
165
|
end
|
123
166
|
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
167
|
+
# Used by Sidekiq to execute the middleware at runtime
|
168
|
+
# @api private
|
169
|
+
def invoke(*args, &block)
|
170
|
+
return yield if empty?
|
171
|
+
|
172
|
+
chain = retrieve
|
173
|
+
traverse(chain, 0, args, &block)
|
174
|
+
end
|
175
|
+
|
176
|
+
private
|
177
|
+
|
178
|
+
def traverse(chain, index, args, &block)
|
179
|
+
if index >= chain.size
|
180
|
+
yield
|
181
|
+
else
|
182
|
+
chain[index].call(*args) do
|
183
|
+
traverse(chain, index + 1, args, &block)
|
131
184
|
end
|
132
185
|
end
|
133
|
-
traverse_chain.call
|
134
186
|
end
|
135
187
|
end
|
136
188
|
|
189
|
+
# Represents each link in the middleware chain
|
190
|
+
# @api private
|
137
191
|
class Entry
|
138
192
|
attr_reader :klass
|
139
193
|
|
140
|
-
def initialize(klass, *args)
|
194
|
+
def initialize(config, klass, *args)
|
195
|
+
@config = config
|
141
196
|
@klass = klass
|
142
|
-
@args
|
197
|
+
@args = args
|
143
198
|
end
|
144
199
|
|
145
200
|
def make_new
|
146
|
-
@klass.new(*@args)
|
201
|
+
x = @klass.new(*@args)
|
202
|
+
x.config = @config if @config && x.respond_to?(:config=)
|
203
|
+
x
|
147
204
|
end
|
148
205
|
end
|
149
206
|
end
|