sidekiq 7.3.9 → 8.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Changes.md +44 -0
- data/README.md +16 -13
- data/bin/sidekiqload +10 -10
- data/bin/webload +69 -0
- data/lib/active_job/queue_adapters/sidekiq_adapter.rb +5 -5
- data/lib/sidekiq/api.rb +120 -36
- data/lib/sidekiq/capsule.rb +6 -6
- data/lib/sidekiq/cli.rb +15 -19
- data/lib/sidekiq/client.rb +13 -16
- data/lib/sidekiq/component.rb +40 -2
- data/lib/sidekiq/config.rb +18 -15
- data/lib/sidekiq/embedded.rb +1 -0
- data/lib/sidekiq/iterable_job.rb +1 -0
- data/lib/sidekiq/job/iterable.rb +13 -4
- data/lib/sidekiq/job_logger.rb +4 -4
- data/lib/sidekiq/job_retry.rb +17 -5
- data/lib/sidekiq/job_util.rb +5 -1
- data/lib/sidekiq/launcher.rb +1 -1
- data/lib/sidekiq/logger.rb +19 -70
- data/lib/sidekiq/manager.rb +0 -1
- data/lib/sidekiq/metrics/query.rb +71 -45
- data/lib/sidekiq/metrics/shared.rb +8 -5
- data/lib/sidekiq/metrics/tracking.rb +9 -7
- data/lib/sidekiq/middleware/current_attributes.rb +5 -17
- data/lib/sidekiq/paginator.rb +8 -1
- data/lib/sidekiq/processor.rb +21 -14
- data/lib/sidekiq/profiler.rb +59 -0
- data/lib/sidekiq/redis_client_adapter.rb +0 -1
- data/lib/sidekiq/redis_connection.rb +14 -3
- data/lib/sidekiq/testing.rb +2 -2
- data/lib/sidekiq/version.rb +2 -2
- data/lib/sidekiq/web/action.rb +104 -84
- data/lib/sidekiq/web/application.rb +347 -332
- data/lib/sidekiq/web/config.rb +117 -0
- data/lib/sidekiq/web/helpers.rb +41 -16
- data/lib/sidekiq/web/router.rb +60 -76
- data/lib/sidekiq/web.rb +50 -156
- data/lib/sidekiq.rb +1 -1
- data/sidekiq.gemspec +6 -6
- data/web/assets/javascripts/application.js +6 -13
- data/web/assets/javascripts/base-charts.js +30 -16
- data/web/assets/javascripts/chartjs-adapter-date-fns.min.js +7 -0
- data/web/assets/javascripts/metrics.js +16 -34
- data/web/assets/stylesheets/style.css +750 -0
- data/web/locales/ar.yml +1 -0
- data/web/locales/cs.yml +1 -0
- data/web/locales/da.yml +1 -0
- data/web/locales/de.yml +1 -0
- data/web/locales/el.yml +1 -0
- data/web/locales/en.yml +6 -0
- data/web/locales/es.yml +24 -2
- data/web/locales/fa.yml +1 -0
- data/web/locales/fr.yml +1 -0
- data/web/locales/gd.yml +1 -0
- data/web/locales/he.yml +1 -0
- data/web/locales/hi.yml +1 -0
- data/web/locales/it.yml +1 -0
- data/web/locales/ja.yml +1 -0
- data/web/locales/ko.yml +1 -0
- data/web/locales/lt.yml +1 -0
- data/web/locales/nb.yml +1 -0
- data/web/locales/nl.yml +1 -0
- data/web/locales/pl.yml +1 -0
- data/web/locales/{pt-br.yml → pt-BR.yml} +2 -1
- data/web/locales/pt.yml +1 -0
- data/web/locales/ru.yml +1 -0
- data/web/locales/sv.yml +1 -0
- data/web/locales/ta.yml +1 -0
- data/web/locales/tr.yml +1 -0
- data/web/locales/uk.yml +1 -0
- data/web/locales/ur.yml +1 -0
- data/web/locales/vi.yml +1 -0
- data/web/locales/{zh-cn.yml → zh-CN.yml} +85 -73
- data/web/locales/{zh-tw.yml → zh-TW.yml} +2 -1
- data/web/views/_footer.erb +31 -33
- data/web/views/_job_info.erb +91 -89
- data/web/views/_metrics_period_select.erb +13 -10
- data/web/views/_nav.erb +14 -21
- data/web/views/_paging.erb +23 -21
- data/web/views/_poll_link.erb +2 -2
- data/web/views/_summary.erb +16 -16
- data/web/views/busy.erb +124 -122
- data/web/views/dashboard.erb +62 -66
- data/web/views/dead.erb +31 -27
- data/web/views/filtering.erb +3 -3
- data/web/views/layout.erb +6 -22
- data/web/views/metrics.erb +75 -81
- data/web/views/metrics_for_job.erb +45 -46
- data/web/views/morgue.erb +61 -70
- data/web/views/profiles.erb +43 -0
- data/web/views/queue.erb +54 -52
- data/web/views/queues.erb +43 -41
- data/web/views/retries.erb +66 -75
- data/web/views/retry.erb +32 -27
- data/web/views/scheduled.erb +58 -54
- data/web/views/scheduled_job_info.erb +1 -1
- metadata +24 -24
- data/web/assets/stylesheets/application-dark.css +0 -147
- data/web/assets/stylesheets/application-rtl.css +0 -163
- data/web/assets/stylesheets/application.css +0 -759
- data/web/assets/stylesheets/bootstrap-rtl.min.css +0 -9
- data/web/assets/stylesheets/bootstrap.css +0 -5
- data/web/views/_status.erb +0 -4
@@ -1,9 +1,7 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "sidekiq"
|
4
3
|
require "date"
|
5
|
-
require "
|
6
|
-
|
4
|
+
require "sidekiq"
|
7
5
|
require "sidekiq/metrics/shared"
|
8
6
|
|
9
7
|
module Sidekiq
|
@@ -12,7 +10,7 @@ module Sidekiq
|
|
12
10
|
# Caller sets a set of attributes to act as filters. {#fetch} will call
|
13
11
|
# Redis and return a Hash of results.
|
14
12
|
#
|
15
|
-
# NB: all metrics and times/dates are UTC only. We
|
13
|
+
# NB: all metrics and times/dates are UTC only. We explicitly do not
|
16
14
|
# support timezones.
|
17
15
|
class Query
|
18
16
|
def initialize(pool: nil, now: Time.now)
|
@@ -21,23 +19,46 @@ module Sidekiq
|
|
21
19
|
@klass = nil
|
22
20
|
end
|
23
21
|
|
22
|
+
ROLLUPS = {
|
23
|
+
# minutely aggregates per minute
|
24
|
+
minutely: [60, ->(time) { time.strftime("j|%y%m%d|%-H:%M") }],
|
25
|
+
# hourly aggregates every 10 minutes so we'll have six data points per hour
|
26
|
+
hourly: [600, ->(time) {
|
27
|
+
m = time.min
|
28
|
+
mins = (m < 10) ? "0" : m.to_s[0]
|
29
|
+
time.strftime("j|%y%m%d|%-H:#{mins}")
|
30
|
+
}]
|
31
|
+
}
|
32
|
+
|
24
33
|
# Get metric data for all jobs from the last hour
|
25
34
|
# +class_filter+: return only results for classes matching filter
|
26
|
-
|
27
|
-
|
28
|
-
|
35
|
+
# +minutes+: the number of fine-grained minute buckets to retrieve
|
36
|
+
# +hours+: the number of coarser-grained 10-minute buckets to retrieve, in hours
|
37
|
+
def top_jobs(class_filter: nil, minutes: nil, hours: nil)
|
29
38
|
time = @time
|
39
|
+
minutes = 60 unless minutes || hours
|
40
|
+
|
41
|
+
# DoS protection, sanity check
|
42
|
+
minutes = 60 if minutes && minutes > 480
|
43
|
+
hours = 72 if hours && hours > 72
|
44
|
+
|
45
|
+
granularity = hours ? :hourly : :minutely
|
46
|
+
result = Result.new(granularity)
|
47
|
+
result.ends_at = time
|
48
|
+
count = hours ? hours * 6 : minutes
|
49
|
+
stride, keyproc = ROLLUPS[granularity]
|
50
|
+
|
30
51
|
redis_results = @pool.with do |conn|
|
31
52
|
conn.pipelined do |pipe|
|
32
|
-
|
33
|
-
key =
|
53
|
+
count.times do |idx|
|
54
|
+
key = keyproc.call(time)
|
34
55
|
pipe.hgetall key
|
35
|
-
|
36
|
-
time -= 60
|
56
|
+
time -= stride
|
37
57
|
end
|
38
58
|
end
|
39
59
|
end
|
40
60
|
|
61
|
+
result.starts_at = time
|
41
62
|
time = @time
|
42
63
|
redis_results.each do |hash|
|
43
64
|
hash.each do |k, v|
|
@@ -45,63 +66,66 @@ module Sidekiq
|
|
45
66
|
next if class_filter && !class_filter.match?(kls)
|
46
67
|
result.job_results[kls].add_metric metric, time, v.to_i
|
47
68
|
end
|
48
|
-
time -=
|
69
|
+
time -= stride
|
49
70
|
end
|
50
71
|
|
51
|
-
result.marks = fetch_marks(result.starts_at..result.ends_at)
|
52
|
-
|
72
|
+
result.marks = fetch_marks(result.starts_at..result.ends_at, granularity)
|
53
73
|
result
|
54
74
|
end
|
55
75
|
|
56
|
-
def for_job(klass, minutes:
|
57
|
-
result = Result.new
|
58
|
-
|
76
|
+
def for_job(klass, minutes: nil, hours: nil)
|
59
77
|
time = @time
|
78
|
+
minutes = 60 unless minutes || hours
|
79
|
+
|
80
|
+
# DoS protection, sanity check
|
81
|
+
minutes = 60 if minutes && minutes > 480
|
82
|
+
hours = 72 if hours && hours > 72
|
83
|
+
|
84
|
+
granularity = hours ? :hourly : :minutely
|
85
|
+
result = Result.new(granularity)
|
86
|
+
result.ends_at = time
|
87
|
+
count = hours ? hours * 6 : minutes
|
88
|
+
stride, keyproc = ROLLUPS[granularity]
|
89
|
+
|
60
90
|
redis_results = @pool.with do |conn|
|
61
91
|
conn.pipelined do |pipe|
|
62
|
-
|
63
|
-
key =
|
92
|
+
count.times do |idx|
|
93
|
+
key = keyproc.call(time)
|
64
94
|
pipe.hmget key, "#{klass}|ms", "#{klass}|p", "#{klass}|f"
|
65
|
-
|
66
|
-
time -= 60
|
95
|
+
time -= stride
|
67
96
|
end
|
68
97
|
end
|
69
98
|
end
|
70
99
|
|
100
|
+
result.starts_at = time
|
71
101
|
time = @time
|
72
102
|
@pool.with do |conn|
|
73
103
|
redis_results.each do |(ms, p, f)|
|
74
104
|
result.job_results[klass].add_metric "ms", time, ms.to_i if ms
|
75
105
|
result.job_results[klass].add_metric "p", time, p.to_i if p
|
76
106
|
result.job_results[klass].add_metric "f", time, f.to_i if f
|
77
|
-
result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time).reverse
|
78
|
-
time -=
|
107
|
+
result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time).reverse if minutes
|
108
|
+
time -= stride
|
79
109
|
end
|
80
110
|
end
|
81
111
|
|
82
|
-
result.marks = fetch_marks(result.starts_at..result.ends_at)
|
83
|
-
|
112
|
+
result.marks = fetch_marks(result.starts_at..result.ends_at, granularity)
|
84
113
|
result
|
85
114
|
end
|
86
115
|
|
87
|
-
class Result < Struct.new(:starts_at, :ends_at, :size, :
|
88
|
-
def initialize
|
116
|
+
class Result < Struct.new(:granularity, :starts_at, :ends_at, :size, :job_results, :marks)
|
117
|
+
def initialize(granularity = :minutely)
|
89
118
|
super
|
90
|
-
self.
|
119
|
+
self.granularity = granularity
|
91
120
|
self.marks = []
|
92
|
-
self.job_results = Hash.new { |h, k| h[k] = JobResult.new }
|
93
|
-
end
|
94
|
-
|
95
|
-
def prepend_bucket(time)
|
96
|
-
buckets.unshift time.strftime("%H:%M")
|
97
|
-
self.ends_at ||= time
|
98
|
-
self.starts_at = time
|
121
|
+
self.job_results = Hash.new { |h, k| h[k] = JobResult.new(granularity) }
|
99
122
|
end
|
100
123
|
end
|
101
124
|
|
102
|
-
class JobResult < Struct.new(:series, :hist, :totals)
|
103
|
-
def initialize
|
125
|
+
class JobResult < Struct.new(:granularity, :series, :hist, :totals)
|
126
|
+
def initialize(granularity = :minutely)
|
104
127
|
super
|
128
|
+
self.granularity = granularity
|
105
129
|
self.series = Hash.new { |h, k| h[k] = Hash.new(0) }
|
106
130
|
self.hist = Hash.new { |h, k| h[k] = [] }
|
107
131
|
self.totals = Hash.new(0)
|
@@ -109,14 +133,14 @@ module Sidekiq
|
|
109
133
|
|
110
134
|
def add_metric(metric, time, value)
|
111
135
|
totals[metric] += value
|
112
|
-
series[metric][
|
136
|
+
series[metric][Query.bkt_time_s(time, granularity)] += value
|
113
137
|
|
114
138
|
# Include timing measurements in seconds for convenience
|
115
139
|
add_metric("s", time, value / 1000.0) if metric == "ms"
|
116
140
|
end
|
117
141
|
|
118
142
|
def add_hist(time, hist_result)
|
119
|
-
hist[
|
143
|
+
hist[Query.bkt_time_s(time, granularity)] = hist_result
|
120
144
|
end
|
121
145
|
|
122
146
|
def total_avg(metric = "ms")
|
@@ -133,22 +157,24 @@ module Sidekiq
|
|
133
157
|
end
|
134
158
|
end
|
135
159
|
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
160
|
+
MarkResult = Struct.new(:time, :label, :bucket)
|
161
|
+
|
162
|
+
def self.bkt_time_s(time, granularity)
|
163
|
+
# truncate time to ten minutes ("8:40", not "8:43") or one minute
|
164
|
+
truncation = (granularity == :hourly) ? 600 : 60
|
165
|
+
Time.at(time.to_i - time.to_i % truncation).utc.iso8601
|
140
166
|
end
|
141
167
|
|
142
168
|
private
|
143
169
|
|
144
|
-
def fetch_marks(time_range)
|
170
|
+
def fetch_marks(time_range, granularity)
|
145
171
|
[].tap do |result|
|
146
172
|
marks = @pool.with { |c| c.hgetall("#{@time.strftime("%Y%m%d")}-marks") }
|
147
173
|
|
148
174
|
marks.each do |timestamp, label|
|
149
175
|
time = Time.parse(timestamp)
|
150
176
|
if time_range.cover? time
|
151
|
-
result << MarkResult.new(time, label)
|
177
|
+
result << MarkResult.new(time, label, Query.bkt_time_s(time, granularity))
|
152
178
|
end
|
153
179
|
end
|
154
180
|
end
|
@@ -25,7 +25,10 @@ module Sidekiq
|
|
25
25
|
#
|
26
26
|
# To store this data, we use Redis' BITFIELD command to store unsigned 16-bit counters
|
27
27
|
# per bucket per klass per minute. It's unlikely that most people will be executing more
|
28
|
-
# than 1000 job/sec for a full minute of a specific type.
|
28
|
+
# than 1000 job/sec for a full minute of a specific type (i.e. overflow 65,536).
|
29
|
+
#
|
30
|
+
# Histograms are only stored at the fine-grained level, they are not rolled up
|
31
|
+
# for longer-term buckets.
|
29
32
|
class Histogram
|
30
33
|
include Enumerable
|
31
34
|
|
@@ -82,15 +85,15 @@ module Sidekiq
|
|
82
85
|
end
|
83
86
|
|
84
87
|
def fetch(conn, now = Time.now)
|
85
|
-
window = now.utc.strftime("
|
86
|
-
key = "
|
88
|
+
window = now.utc.strftime("%-d-%-H:%-M")
|
89
|
+
key = "h|#{@klass}-#{window}"
|
87
90
|
conn.bitfield_ro(key, *FETCH)
|
88
91
|
end
|
89
92
|
|
90
93
|
def persist(conn, now = Time.now)
|
91
94
|
buckets, @buckets = @buckets, []
|
92
|
-
window = now.utc.strftime("
|
93
|
-
key = "
|
95
|
+
window = now.utc.strftime("%-d-%-H:%-M")
|
96
|
+
key = "h|#{@klass}-#{window}"
|
94
97
|
cmd = [key, "OVERFLOW", "SAT"]
|
95
98
|
buckets.each_with_index do |counter, idx|
|
96
99
|
val = counter.value
|
@@ -19,13 +19,13 @@ module Sidekiq
|
|
19
19
|
end
|
20
20
|
|
21
21
|
def track(queue, klass)
|
22
|
-
start =
|
22
|
+
start = mono_ms
|
23
23
|
time_ms = 0
|
24
24
|
begin
|
25
25
|
begin
|
26
26
|
yield
|
27
27
|
ensure
|
28
|
-
finish =
|
28
|
+
finish = mono_ms
|
29
29
|
time_ms = finish - start
|
30
30
|
end
|
31
31
|
# We don't track time for failed jobs as they can have very unpredictable
|
@@ -51,7 +51,7 @@ module Sidekiq
|
|
51
51
|
end
|
52
52
|
|
53
53
|
# LONG_TERM = 90 * 24 * 60 * 60
|
54
|
-
|
54
|
+
MID_TERM = 3 * 24 * 60 * 60
|
55
55
|
SHORT_TERM = 8 * 60 * 60
|
56
56
|
|
57
57
|
def flush(time = Time.now)
|
@@ -62,8 +62,10 @@ module Sidekiq
|
|
62
62
|
|
63
63
|
now = time.utc
|
64
64
|
# nowdate = now.strftime("%Y%m%d")
|
65
|
-
#
|
66
|
-
|
65
|
+
# "250214|8:4" is the 10 minute bucket for Feb 14 2025, 08:43
|
66
|
+
nowmid = now.strftime("%y%m%d|%-H:%M")[0..-2]
|
67
|
+
# "250214|8:43" is the 1 minute bucket for Feb 14 2025, 08:43
|
68
|
+
nowshort = now.strftime("%y%m%d|%-H:%M")
|
67
69
|
count = 0
|
68
70
|
|
69
71
|
redis do |conn|
|
@@ -81,8 +83,8 @@ module Sidekiq
|
|
81
83
|
# daily or hourly rollups.
|
82
84
|
[
|
83
85
|
# ["j", jobs, nowdate, LONG_TERM],
|
84
|
-
|
85
|
-
["j", jobs,
|
86
|
+
["j", jobs, nowmid, MID_TERM],
|
87
|
+
["j", jobs, nowshort, SHORT_TERM]
|
86
88
|
].each do |prefix, data, bucket, ttl|
|
87
89
|
conn.pipelined do |xa|
|
88
90
|
stats = "#{prefix}|#{bucket}"
|
@@ -1,5 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
+
require "active_job/arguments"
|
3
4
|
require "active_support/current_attributes"
|
4
5
|
|
5
6
|
module Sidekiq
|
@@ -20,6 +21,8 @@ module Sidekiq
|
|
20
21
|
# Sidekiq::CurrentAttributes.persist(["Myapp::Current", "Myapp::OtherCurrent"])
|
21
22
|
#
|
22
23
|
module CurrentAttributes
|
24
|
+
Serializer = ::ActiveJob::Arguments
|
25
|
+
|
23
26
|
class Save
|
24
27
|
include Sidekiq::ClientMiddleware
|
25
28
|
|
@@ -33,26 +36,11 @@ module Sidekiq
|
|
33
36
|
attrs = strklass.constantize.attributes
|
34
37
|
# Retries can push the job N times, we don't
|
35
38
|
# want retries to reset cattr. #5692, #5090
|
36
|
-
if attrs.any?
|
37
|
-
# Older rails has a bug that `CurrentAttributes#attributes` always returns
|
38
|
-
# the same hash instance. We need to dup it to avoid being accidentally mutated.
|
39
|
-
job[key] = if returns_same_object?
|
40
|
-
attrs.dup
|
41
|
-
else
|
42
|
-
attrs
|
43
|
-
end
|
44
|
-
end
|
39
|
+
job[key] = Serializer.serialize(attrs) if attrs.any?
|
45
40
|
end
|
46
41
|
end
|
47
42
|
yield
|
48
43
|
end
|
49
|
-
|
50
|
-
private
|
51
|
-
|
52
|
-
def returns_same_object?
|
53
|
-
ActiveSupport::VERSION::MAJOR < 8 ||
|
54
|
-
(ActiveSupport::VERSION::MAJOR == 8 && ActiveSupport::VERSION::MINOR == 0)
|
55
|
-
end
|
56
44
|
end
|
57
45
|
|
58
46
|
class Load
|
@@ -68,7 +56,7 @@ module Sidekiq
|
|
68
56
|
@cattrs.each do |(key, strklass)|
|
69
57
|
next unless job.has_key?(key)
|
70
58
|
|
71
|
-
klass_attrs[strklass.constantize] = job[key]
|
59
|
+
klass_attrs[strklass.constantize] = Serializer.deserialize(job[key]).to_h
|
72
60
|
end
|
73
61
|
|
74
62
|
wrap(klass_attrs.to_a, &block)
|
data/lib/sidekiq/paginator.rb
CHANGED
@@ -17,7 +17,14 @@ module Sidekiq
|
|
17
17
|
ending = starting + page_size - 1
|
18
18
|
|
19
19
|
Sidekiq.redis do |conn|
|
20
|
-
|
20
|
+
# horrible, think you can make this cleaner?
|
21
|
+
type = TYPE_CACHE[key]
|
22
|
+
if type
|
23
|
+
elsif key.start_with?("queue:")
|
24
|
+
type = TYPE_CACHE[key] = "list"
|
25
|
+
else
|
26
|
+
type = TYPE_CACHE[key] = conn.type(key)
|
27
|
+
end
|
21
28
|
rev = opts && opts[:reverse]
|
22
29
|
|
23
30
|
case type
|
data/lib/sidekiq/processor.rb
CHANGED
@@ -3,6 +3,7 @@
|
|
3
3
|
require "sidekiq/fetch"
|
4
4
|
require "sidekiq/job_logger"
|
5
5
|
require "sidekiq/job_retry"
|
6
|
+
require "sidekiq/profiler"
|
6
7
|
|
7
8
|
module Sidekiq
|
8
9
|
##
|
@@ -66,7 +67,7 @@ module Sidekiq
|
|
66
67
|
@thread ||= safe_thread("#{config.name}/processor", &method(:run))
|
67
68
|
end
|
68
69
|
|
69
|
-
private
|
70
|
+
private
|
70
71
|
|
71
72
|
def run
|
72
73
|
# By setting this thread-local, Sidekiq.redis will access +Sidekiq::Capsule#redis_pool+
|
@@ -112,13 +113,17 @@ module Sidekiq
|
|
112
113
|
def handle_fetch_exception(ex)
|
113
114
|
unless @down
|
114
115
|
@down = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
115
|
-
logger.error("Error fetching job: #{ex}")
|
116
116
|
handle_exception(ex)
|
117
117
|
end
|
118
118
|
sleep(1)
|
119
119
|
nil
|
120
120
|
end
|
121
121
|
|
122
|
+
def profile(job, &block)
|
123
|
+
return yield unless job["profile"]
|
124
|
+
Sidekiq::Profiler.new(config).call(job, &block)
|
125
|
+
end
|
126
|
+
|
122
127
|
def dispatch(job_hash, queue, jobstr)
|
123
128
|
# since middleware can mutate the job hash
|
124
129
|
# we need to clone it to report the original
|
@@ -132,17 +137,19 @@ module Sidekiq
|
|
132
137
|
@retrier.global(jobstr, queue) do
|
133
138
|
@job_logger.call(job_hash, queue) do
|
134
139
|
stats(jobstr, queue) do
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
140
|
+
profile(job_hash) do
|
141
|
+
# Rails 5 requires a Reloader to wrap code execution. In order to
|
142
|
+
# constantize the worker and instantiate an instance, we have to call
|
143
|
+
# the Reloader. It handles code loading, db connection management, etc.
|
144
|
+
# Effectively this block denotes a "unit of work" to Rails.
|
145
|
+
@reloader.call do
|
146
|
+
klass = Object.const_get(job_hash["class"])
|
147
|
+
instance = klass.new
|
148
|
+
instance.jid = job_hash["jid"]
|
149
|
+
instance._context = self
|
150
|
+
@retrier.local(instance, jobstr, queue) do
|
151
|
+
yield instance
|
152
|
+
end
|
146
153
|
end
|
147
154
|
end
|
148
155
|
end
|
@@ -165,7 +172,6 @@ module Sidekiq
|
|
165
172
|
begin
|
166
173
|
job_hash = Sidekiq.load_json(jobstr)
|
167
174
|
rescue => ex
|
168
|
-
handle_exception(ex, {context: "Invalid JSON for job", jobstr: jobstr})
|
169
175
|
now = Time.now.to_f
|
170
176
|
redis do |conn|
|
171
177
|
conn.multi do |xa|
|
@@ -174,6 +180,7 @@ module Sidekiq
|
|
174
180
|
xa.zremrangebyrank("dead", 0, - @capsule.config[:dead_max_jobs])
|
175
181
|
end
|
176
182
|
end
|
183
|
+
handle_exception(ex, {context: "Invalid JSON for job", jobstr: jobstr})
|
177
184
|
return uow.acknowledge
|
178
185
|
end
|
179
186
|
|
@@ -0,0 +1,59 @@
|
|
1
|
+
require "fileutils"
|
2
|
+
require "sidekiq/component"
|
3
|
+
|
4
|
+
module Sidekiq
|
5
|
+
# Allows the user to profile jobs running in production.
|
6
|
+
# See details in the Profiling wiki page.
|
7
|
+
class Profiler
|
8
|
+
EXPIRY = 86400 # 1 day
|
9
|
+
DEFAULT_OPTIONS = {
|
10
|
+
mode: :wall
|
11
|
+
}
|
12
|
+
|
13
|
+
include Sidekiq::Component
|
14
|
+
def initialize(config)
|
15
|
+
@config = config
|
16
|
+
end
|
17
|
+
|
18
|
+
def call(job, &block)
|
19
|
+
return yield unless job["profile"]
|
20
|
+
|
21
|
+
token = job["profile"]
|
22
|
+
type = job["class"]
|
23
|
+
jid = job["jid"]
|
24
|
+
started_at = Time.now
|
25
|
+
options = DEFAULT_OPTIONS.merge((job["profiler_options"] || {}).transform_keys!(&:to_sym))
|
26
|
+
|
27
|
+
rundata = {
|
28
|
+
started_at: started_at.to_i,
|
29
|
+
token: token,
|
30
|
+
type: type,
|
31
|
+
jid: jid,
|
32
|
+
# .gz extension tells Vernier to compress the data
|
33
|
+
filename: "#{token}-#{type}-#{jid}-#{started_at.strftime("%Y%m%d-%H%M%S")}.json.gz"
|
34
|
+
}
|
35
|
+
|
36
|
+
require "vernier"
|
37
|
+
begin
|
38
|
+
a = Time.now
|
39
|
+
rc = Vernier.profile(**options.merge(out: rundata[:filename]), &block)
|
40
|
+
b = Time.now
|
41
|
+
|
42
|
+
# Failed jobs will raise an exception on previous line and skip this
|
43
|
+
# block. Only successful jobs will persist profile data to Redis.
|
44
|
+
key = "#{token}-#{jid}"
|
45
|
+
data = File.read(rundata[:filename])
|
46
|
+
redis do |conn|
|
47
|
+
conn.multi do |m|
|
48
|
+
m.zadd("profiles", Time.now.to_f + EXPIRY, key)
|
49
|
+
m.hset(key, rundata.merge(elapsed: (b - a), data: data, size: data.bytesize))
|
50
|
+
m.expire(key, EXPIRY)
|
51
|
+
end
|
52
|
+
end
|
53
|
+
rc
|
54
|
+
ensure
|
55
|
+
FileUtils.rm_f(rundata[:filename])
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
@@ -10,6 +10,8 @@ module Sidekiq
|
|
10
10
|
def create(options = {})
|
11
11
|
symbolized_options = deep_symbolize_keys(options)
|
12
12
|
symbolized_options[:url] ||= determine_redis_provider
|
13
|
+
symbolized_options[:password] = wrap(symbolized_options[:password]) if symbolized_options.key?(:password)
|
14
|
+
symbolized_options[:sentinel_password] = wrap(symbolized_options[:sentinel_password]) if symbolized_options.key?(:sentinel_password)
|
13
15
|
|
14
16
|
logger = symbolized_options.delete(:logger)
|
15
17
|
logger&.info { "Sidekiq #{Sidekiq::VERSION} connecting to Redis with options #{scrub(symbolized_options)}" }
|
@@ -38,6 +40,15 @@ module Sidekiq
|
|
38
40
|
|
39
41
|
private
|
40
42
|
|
43
|
+
# Wrap hard-coded passwords in a Proc to avoid logging the value
|
44
|
+
def wrap(pwd)
|
45
|
+
if pwd.is_a?(String)
|
46
|
+
->(username) { pwd }
|
47
|
+
else
|
48
|
+
pwd
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
41
52
|
def deep_symbolize_keys(object)
|
42
53
|
case object
|
43
54
|
when Hash
|
@@ -57,14 +68,14 @@ module Sidekiq
|
|
57
68
|
# Deep clone so we can muck with these options all we want and exclude
|
58
69
|
# params from dump-and-load that may contain objects that Marshal is
|
59
70
|
# unable to safely dump.
|
60
|
-
keys = options.keys - [:logger, :ssl_params]
|
71
|
+
keys = options.keys - [:logger, :ssl_params, :password, :sentinel_password]
|
61
72
|
scrubbed_options = Marshal.load(Marshal.dump(options.slice(*keys)))
|
62
73
|
if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
|
63
74
|
uri.password = redacted
|
64
75
|
scrubbed_options[:url] = uri.to_s
|
65
76
|
end
|
66
|
-
scrubbed_options[:password] = redacted if
|
67
|
-
scrubbed_options[:sentinel_password] = redacted if
|
77
|
+
scrubbed_options[:password] = redacted if options.key?(:password)
|
78
|
+
scrubbed_options[:sentinel_password] = redacted if options.key?(:sentinel_password)
|
68
79
|
scrubbed_options[:sentinels]&.each do |sentinel|
|
69
80
|
if sentinel.is_a?(String)
|
70
81
|
if (uri = URI(sentinel)) && uri.password
|
data/lib/sidekiq/testing.rb
CHANGED
@@ -87,7 +87,7 @@ module Sidekiq
|
|
87
87
|
if Sidekiq::Testing.fake?
|
88
88
|
payloads.each do |job|
|
89
89
|
job = Sidekiq.load_json(Sidekiq.dump_json(job))
|
90
|
-
job["enqueued_at"] =
|
90
|
+
job["enqueued_at"] = ::Process.clock_gettime(::Process::CLOCK_REALTIME, :millisecond) unless job["at"]
|
91
91
|
Queues.push(job["queue"], job["class"], job)
|
92
92
|
end
|
93
93
|
true
|
@@ -329,6 +329,6 @@ module Sidekiq
|
|
329
329
|
end
|
330
330
|
end
|
331
331
|
|
332
|
-
if defined?(::Rails) && Rails.respond_to?(:env) && !Rails.env.test? && !$TESTING
|
332
|
+
if defined?(::Rails) && Rails.respond_to?(:env) && !Rails.env.test? && !$TESTING # rubocop:disable Style/GlobalVars
|
333
333
|
warn("⛔️ WARNING: Sidekiq testing API enabled, but this is not the test environment. Your jobs will not go to Redis.", uplevel: 1)
|
334
334
|
end
|