sidekiq 7.3.0 → 8.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +158 -0
  3. data/README.md +16 -13
  4. data/bin/sidekiqload +31 -22
  5. data/bin/webload +69 -0
  6. data/lib/active_job/queue_adapters/sidekiq_adapter.rb +120 -0
  7. data/lib/generators/sidekiq/job_generator.rb +2 -0
  8. data/lib/sidekiq/api.rb +184 -71
  9. data/lib/sidekiq/capsule.rb +11 -9
  10. data/lib/sidekiq/cli.rb +16 -20
  11. data/lib/sidekiq/client.rb +28 -11
  12. data/lib/sidekiq/component.rb +62 -2
  13. data/lib/sidekiq/config.rb +42 -18
  14. data/lib/sidekiq/deploy.rb +2 -0
  15. data/lib/sidekiq/embedded.rb +4 -1
  16. data/lib/sidekiq/iterable_job.rb +3 -0
  17. data/lib/sidekiq/job/interrupt_handler.rb +2 -0
  18. data/lib/sidekiq/job/iterable/active_record_enumerator.rb +3 -3
  19. data/lib/sidekiq/job/iterable.rb +82 -7
  20. data/lib/sidekiq/job_logger.rb +15 -27
  21. data/lib/sidekiq/job_retry.rb +17 -5
  22. data/lib/sidekiq/job_util.rb +7 -1
  23. data/lib/sidekiq/launcher.rb +3 -2
  24. data/lib/sidekiq/logger.rb +19 -70
  25. data/lib/sidekiq/manager.rb +0 -1
  26. data/lib/sidekiq/metrics/query.rb +73 -45
  27. data/lib/sidekiq/metrics/shared.rb +23 -9
  28. data/lib/sidekiq/metrics/tracking.rb +22 -12
  29. data/lib/sidekiq/middleware/current_attributes.rb +12 -4
  30. data/lib/sidekiq/middleware/modules.rb +2 -0
  31. data/lib/sidekiq/monitor.rb +2 -1
  32. data/lib/sidekiq/paginator.rb +14 -1
  33. data/lib/sidekiq/processor.rb +26 -19
  34. data/lib/sidekiq/profiler.rb +72 -0
  35. data/lib/sidekiq/rails.rb +44 -55
  36. data/lib/sidekiq/redis_client_adapter.rb +0 -1
  37. data/lib/sidekiq/redis_connection.rb +22 -4
  38. data/lib/sidekiq/ring_buffer.rb +2 -0
  39. data/lib/sidekiq/systemd.rb +2 -0
  40. data/lib/sidekiq/testing.rb +7 -7
  41. data/lib/sidekiq/version.rb +6 -2
  42. data/lib/sidekiq/web/action.rb +124 -69
  43. data/lib/sidekiq/web/application.rb +355 -377
  44. data/lib/sidekiq/web/config.rb +120 -0
  45. data/lib/sidekiq/web/helpers.rb +64 -33
  46. data/lib/sidekiq/web/router.rb +61 -74
  47. data/lib/sidekiq/web.rb +52 -150
  48. data/lib/sidekiq.rb +5 -4
  49. data/sidekiq.gemspec +6 -6
  50. data/web/assets/javascripts/application.js +6 -13
  51. data/web/assets/javascripts/base-charts.js +30 -16
  52. data/web/assets/javascripts/chartjs-adapter-date-fns.min.js +7 -0
  53. data/web/assets/javascripts/dashboard-charts.js +2 -0
  54. data/web/assets/javascripts/dashboard.js +7 -1
  55. data/web/assets/javascripts/metrics.js +16 -34
  56. data/web/assets/stylesheets/style.css +766 -0
  57. data/web/locales/ar.yml +1 -0
  58. data/web/locales/cs.yml +1 -0
  59. data/web/locales/da.yml +1 -0
  60. data/web/locales/de.yml +1 -0
  61. data/web/locales/el.yml +1 -0
  62. data/web/locales/en.yml +9 -1
  63. data/web/locales/es.yml +24 -2
  64. data/web/locales/fa.yml +1 -0
  65. data/web/locales/fr.yml +1 -1
  66. data/web/locales/gd.yml +1 -1
  67. data/web/locales/he.yml +1 -0
  68. data/web/locales/hi.yml +1 -0
  69. data/web/locales/it.yml +40 -1
  70. data/web/locales/ja.yml +1 -1
  71. data/web/locales/ko.yml +1 -0
  72. data/web/locales/lt.yml +1 -0
  73. data/web/locales/nb.yml +1 -0
  74. data/web/locales/nl.yml +1 -0
  75. data/web/locales/pl.yml +1 -0
  76. data/web/locales/{pt-br.yml → pt-BR.yml} +3 -3
  77. data/web/locales/pt.yml +1 -0
  78. data/web/locales/ru.yml +1 -0
  79. data/web/locales/sv.yml +1 -0
  80. data/web/locales/ta.yml +1 -0
  81. data/web/locales/tr.yml +2 -2
  82. data/web/locales/uk.yml +25 -1
  83. data/web/locales/ur.yml +1 -0
  84. data/web/locales/vi.yml +1 -0
  85. data/web/locales/{zh-cn.yml → zh-CN.yml} +85 -74
  86. data/web/locales/{zh-tw.yml → zh-TW.yml} +2 -2
  87. data/web/views/_footer.erb +31 -34
  88. data/web/views/_job_info.erb +91 -89
  89. data/web/views/_metrics_period_select.erb +13 -10
  90. data/web/views/_nav.erb +14 -21
  91. data/web/views/_paging.erb +23 -21
  92. data/web/views/_poll_link.erb +2 -2
  93. data/web/views/_summary.erb +16 -16
  94. data/web/views/busy.erb +124 -122
  95. data/web/views/dashboard.erb +63 -64
  96. data/web/views/dead.erb +31 -27
  97. data/web/views/filtering.erb +3 -4
  98. data/web/views/layout.erb +13 -29
  99. data/web/views/metrics.erb +75 -82
  100. data/web/views/metrics_for_job.erb +45 -46
  101. data/web/views/morgue.erb +61 -70
  102. data/web/views/profiles.erb +43 -0
  103. data/web/views/queue.erb +54 -52
  104. data/web/views/queues.erb +43 -41
  105. data/web/views/retries.erb +66 -75
  106. data/web/views/retry.erb +32 -27
  107. data/web/views/scheduled.erb +59 -55
  108. data/web/views/scheduled_job_info.erb +1 -1
  109. metadata +27 -29
  110. data/web/assets/stylesheets/application-dark.css +0 -147
  111. data/web/assets/stylesheets/application-rtl.css +0 -163
  112. data/web/assets/stylesheets/application.css +0 -758
  113. data/web/assets/stylesheets/bootstrap-rtl.min.css +0 -9
  114. data/web/assets/stylesheets/bootstrap.css +0 -5
  115. data/web/views/_status.erb +0 -4
@@ -1,7 +1,7 @@
1
- require "sidekiq"
2
- require "date"
3
- require "set"
1
+ # frozen_string_literal: true
4
2
 
3
+ require "date"
4
+ require "sidekiq"
5
5
  require "sidekiq/metrics/shared"
6
6
 
7
7
  module Sidekiq
@@ -10,7 +10,7 @@ module Sidekiq
10
10
  # Caller sets a set of attributes to act as filters. {#fetch} will call
11
11
  # Redis and return a Hash of results.
12
12
  #
13
- # NB: all metrics and times/dates are UTC only. We specifically do not
13
+ # NB: all metrics and times/dates are UTC only. We explicitly do not
14
14
  # support timezones.
15
15
  class Query
16
16
  def initialize(pool: nil, now: Time.now)
@@ -19,23 +19,46 @@ module Sidekiq
19
19
  @klass = nil
20
20
  end
21
21
 
22
+ ROLLUPS = {
23
+ # minutely aggregates per minute
24
+ minutely: [60, ->(time) { time.strftime("j|%y%m%d|%-H:%M") }],
25
+ # hourly aggregates every 10 minutes so we'll have six data points per hour
26
+ hourly: [600, ->(time) {
27
+ m = time.min
28
+ mins = (m < 10) ? "0" : m.to_s[0]
29
+ time.strftime("j|%y%m%d|%-H:#{mins}")
30
+ }]
31
+ }
32
+
22
33
  # Get metric data for all jobs from the last hour
23
34
  # +class_filter+: return only results for classes matching filter
24
- def top_jobs(class_filter: nil, minutes: 60)
25
- result = Result.new
26
-
35
+ # +minutes+: the number of fine-grained minute buckets to retrieve
36
+ # +hours+: the number of coarser-grained 10-minute buckets to retrieve, in hours
37
+ def top_jobs(class_filter: nil, minutes: nil, hours: nil)
27
38
  time = @time
39
+ minutes = 60 unless minutes || hours
40
+
41
+ # DoS protection, sanity check
42
+ minutes = 60 if minutes && minutes > 480
43
+ hours = 72 if hours && hours > 72
44
+
45
+ granularity = hours ? :hourly : :minutely
46
+ result = Result.new(granularity)
47
+ result.ends_at = time
48
+ count = hours ? hours * 6 : minutes
49
+ stride, keyproc = ROLLUPS[granularity]
50
+
28
51
  redis_results = @pool.with do |conn|
29
52
  conn.pipelined do |pipe|
30
- minutes.times do |idx|
31
- key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
53
+ count.times do |idx|
54
+ key = keyproc.call(time)
32
55
  pipe.hgetall key
33
- result.prepend_bucket time
34
- time -= 60
56
+ time -= stride
35
57
  end
36
58
  end
37
59
  end
38
60
 
61
+ result.starts_at = time
39
62
  time = @time
40
63
  redis_results.each do |hash|
41
64
  hash.each do |k, v|
@@ -43,63 +66,66 @@ module Sidekiq
43
66
  next if class_filter && !class_filter.match?(kls)
44
67
  result.job_results[kls].add_metric metric, time, v.to_i
45
68
  end
46
- time -= 60
69
+ time -= stride
47
70
  end
48
71
 
49
- result.marks = fetch_marks(result.starts_at..result.ends_at)
50
-
72
+ result.marks = fetch_marks(result.starts_at..result.ends_at, granularity)
51
73
  result
52
74
  end
53
75
 
54
- def for_job(klass, minutes: 60)
55
- result = Result.new
56
-
76
+ def for_job(klass, minutes: nil, hours: nil)
57
77
  time = @time
78
+ minutes = 60 unless minutes || hours
79
+
80
+ # DoS protection, sanity check
81
+ minutes = 60 if minutes && minutes > 480
82
+ hours = 72 if hours && hours > 72
83
+
84
+ granularity = hours ? :hourly : :minutely
85
+ result = Result.new(granularity)
86
+ result.ends_at = time
87
+ count = hours ? hours * 6 : minutes
88
+ stride, keyproc = ROLLUPS[granularity]
89
+
58
90
  redis_results = @pool.with do |conn|
59
91
  conn.pipelined do |pipe|
60
- minutes.times do |idx|
61
- key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
92
+ count.times do |idx|
93
+ key = keyproc.call(time)
62
94
  pipe.hmget key, "#{klass}|ms", "#{klass}|p", "#{klass}|f"
63
- result.prepend_bucket time
64
- time -= 60
95
+ time -= stride
65
96
  end
66
97
  end
67
98
  end
68
99
 
100
+ result.starts_at = time
69
101
  time = @time
70
102
  @pool.with do |conn|
71
103
  redis_results.each do |(ms, p, f)|
72
104
  result.job_results[klass].add_metric "ms", time, ms.to_i if ms
73
105
  result.job_results[klass].add_metric "p", time, p.to_i if p
74
106
  result.job_results[klass].add_metric "f", time, f.to_i if f
75
- result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time).reverse
76
- time -= 60
107
+ result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time).reverse if minutes
108
+ time -= stride
77
109
  end
78
110
  end
79
111
 
80
- result.marks = fetch_marks(result.starts_at..result.ends_at)
81
-
112
+ result.marks = fetch_marks(result.starts_at..result.ends_at, granularity)
82
113
  result
83
114
  end
84
115
 
85
- class Result < Struct.new(:starts_at, :ends_at, :size, :buckets, :job_results, :marks)
86
- def initialize
116
+ class Result < Struct.new(:granularity, :starts_at, :ends_at, :size, :job_results, :marks)
117
+ def initialize(granularity = :minutely)
87
118
  super
88
- self.buckets = []
119
+ self.granularity = granularity
89
120
  self.marks = []
90
- self.job_results = Hash.new { |h, k| h[k] = JobResult.new }
91
- end
92
-
93
- def prepend_bucket(time)
94
- buckets.unshift time.strftime("%H:%M")
95
- self.ends_at ||= time
96
- self.starts_at = time
121
+ self.job_results = Hash.new { |h, k| h[k] = JobResult.new(granularity) }
97
122
  end
98
123
  end
99
124
 
100
- class JobResult < Struct.new(:series, :hist, :totals)
101
- def initialize
125
+ class JobResult < Struct.new(:granularity, :series, :hist, :totals)
126
+ def initialize(granularity = :minutely)
102
127
  super
128
+ self.granularity = granularity
103
129
  self.series = Hash.new { |h, k| h[k] = Hash.new(0) }
104
130
  self.hist = Hash.new { |h, k| h[k] = [] }
105
131
  self.totals = Hash.new(0)
@@ -107,14 +133,14 @@ module Sidekiq
107
133
 
108
134
  def add_metric(metric, time, value)
109
135
  totals[metric] += value
110
- series[metric][time.strftime("%H:%M")] += value
136
+ series[metric][Query.bkt_time_s(time, granularity)] += value
111
137
 
112
138
  # Include timing measurements in seconds for convenience
113
139
  add_metric("s", time, value / 1000.0) if metric == "ms"
114
140
  end
115
141
 
116
142
  def add_hist(time, hist_result)
117
- hist[time.strftime("%H:%M")] = hist_result
143
+ hist[Query.bkt_time_s(time, granularity)] = hist_result
118
144
  end
119
145
 
120
146
  def total_avg(metric = "ms")
@@ -131,22 +157,24 @@ module Sidekiq
131
157
  end
132
158
  end
133
159
 
134
- class MarkResult < Struct.new(:time, :label)
135
- def bucket
136
- time.strftime("%H:%M")
137
- end
160
+ MarkResult = Struct.new(:time, :label, :bucket)
161
+
162
+ def self.bkt_time_s(time, granularity)
163
+ # truncate time to ten minutes ("8:40", not "8:43") or one minute
164
+ truncation = (granularity == :hourly) ? 600 : 60
165
+ Time.at(time.to_i - time.to_i % truncation).utc.iso8601
138
166
  end
139
167
 
140
168
  private
141
169
 
142
- def fetch_marks(time_range)
170
+ def fetch_marks(time_range, granularity)
143
171
  [].tap do |result|
144
172
  marks = @pool.with { |c| c.hgetall("#{@time.strftime("%Y%m%d")}-marks") }
145
173
 
146
174
  marks.each do |timestamp, label|
147
175
  time = Time.parse(timestamp)
148
176
  if time_range.cover? time
149
- result << MarkResult.new(time, label)
177
+ result << MarkResult.new(time, label, Query.bkt_time_s(time, granularity))
150
178
  end
151
179
  end
152
180
  end
@@ -1,10 +1,21 @@
1
- require "concurrent"
1
+ # frozen_string_literal: true
2
2
 
3
3
  module Sidekiq
4
4
  module Metrics
5
- # This is the only dependency on concurrent-ruby in Sidekiq but it's
6
- # mandatory for thread-safety until MRI supports atomic operations on values.
7
- Counter = ::Concurrent::AtomicFixnum
5
+ class Counter
6
+ def initialize
7
+ @value = 0
8
+ @lock = Mutex.new
9
+ end
10
+
11
+ def increment
12
+ @lock.synchronize { @value += 1 }
13
+ end
14
+
15
+ def value
16
+ @lock.synchronize { @value }
17
+ end
18
+ end
8
19
 
9
20
  # Implements space-efficient but statistically useful histogram storage.
10
21
  # A precise time histogram stores every time. Instead we break times into a set of
@@ -14,7 +25,10 @@ module Sidekiq
14
25
  #
15
26
  # To store this data, we use Redis' BITFIELD command to store unsigned 16-bit counters
16
27
  # per bucket per klass per minute. It's unlikely that most people will be executing more
17
- # than 1000 job/sec for a full minute of a specific type.
28
+ # than 1000 job/sec for a full minute of a specific type (i.e. overflow 65,536).
29
+ #
30
+ # Histograms are only stored at the fine-grained level, they are not rolled up
31
+ # for longer-term buckets.
18
32
  class Histogram
19
33
  include Enumerable
20
34
 
@@ -71,15 +85,15 @@ module Sidekiq
71
85
  end
72
86
 
73
87
  def fetch(conn, now = Time.now)
74
- window = now.utc.strftime("%d-%H:%-M")
75
- key = "#{@klass}-#{window}"
88
+ window = now.utc.strftime("%-d-%-H:%-M")
89
+ key = "h|#{@klass}-#{window}"
76
90
  conn.bitfield_ro(key, *FETCH)
77
91
  end
78
92
 
79
93
  def persist(conn, now = Time.now)
80
94
  buckets, @buckets = @buckets, []
81
- window = now.utc.strftime("%d-%H:%-M")
82
- key = "#{@klass}-#{window}"
95
+ window = now.utc.strftime("%-d-%-H:%-M")
96
+ key = "h|#{@klass}-#{window}"
83
97
  cmd = [key, "OVERFLOW", "SAT"]
84
98
  buckets.each_with_index do |counter, idx|
85
99
  val = counter.value
@@ -19,23 +19,23 @@ module Sidekiq
19
19
  end
20
20
 
21
21
  def track(queue, klass)
22
- start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
22
+ start = mono_ms
23
23
  time_ms = 0
24
24
  begin
25
25
  begin
26
26
  yield
27
27
  ensure
28
- finish = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
28
+ finish = mono_ms
29
29
  time_ms = finish - start
30
30
  end
31
31
  # We don't track time for failed jobs as they can have very unpredictable
32
32
  # execution times. more important to know average time for successful jobs so we
33
33
  # can better recognize when a perf regression is introduced.
34
- @lock.synchronize {
35
- @grams[klass].record_time(time_ms)
36
- @jobs["#{klass}|ms"] += time_ms
37
- @totals["ms"] += time_ms
38
- }
34
+ track_time(klass, time_ms)
35
+ rescue JobRetry::Skip
36
+ # This is raised when iterable job is interrupted.
37
+ track_time(klass, time_ms)
38
+ raise
39
39
  rescue Exception
40
40
  @lock.synchronize {
41
41
  @jobs["#{klass}|f"] += 1
@@ -51,7 +51,7 @@ module Sidekiq
51
51
  end
52
52
 
53
53
  # LONG_TERM = 90 * 24 * 60 * 60
54
- # MID_TERM = 7 * 24 * 60 * 60
54
+ MID_TERM = 3 * 24 * 60 * 60
55
55
  SHORT_TERM = 8 * 60 * 60
56
56
 
57
57
  def flush(time = Time.now)
@@ -62,8 +62,10 @@ module Sidekiq
62
62
 
63
63
  now = time.utc
64
64
  # nowdate = now.strftime("%Y%m%d")
65
- # nowhour = now.strftime("%Y%m%d|%-H")
66
- nowmin = now.strftime("%Y%m%d|%-H:%-M")
65
+ # "250214|8:4" is the 10 minute bucket for Feb 14 2025, 08:43
66
+ nowmid = now.strftime("%y%m%d|%-H:%M")[0..-2]
67
+ # "250214|8:43" is the 1 minute bucket for Feb 14 2025, 08:43
68
+ nowshort = now.strftime("%y%m%d|%-H:%M")
67
69
  count = 0
68
70
 
69
71
  redis do |conn|
@@ -81,8 +83,8 @@ module Sidekiq
81
83
  # daily or hourly rollups.
82
84
  [
83
85
  # ["j", jobs, nowdate, LONG_TERM],
84
- # ["j", jobs, nowhour, MID_TERM],
85
- ["j", jobs, nowmin, SHORT_TERM]
86
+ ["j", jobs, nowmid, MID_TERM],
87
+ ["j", jobs, nowshort, SHORT_TERM]
86
88
  ].each do |prefix, data, bucket, ttl|
87
89
  conn.pipelined do |xa|
88
90
  stats = "#{prefix}|#{bucket}"
@@ -100,6 +102,14 @@ module Sidekiq
100
102
 
101
103
  private
102
104
 
105
+ def track_time(klass, time_ms)
106
+ @lock.synchronize {
107
+ @grams[klass].record_time(time_ms)
108
+ @jobs["#{klass}|ms"] += time_ms
109
+ @totals["ms"] += time_ms
110
+ }
111
+ end
112
+
103
113
  def reset
104
114
  @lock.synchronize {
105
115
  array = [@totals, @jobs, @grams]
@@ -1,3 +1,6 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "active_job/arguments"
1
4
  require "active_support/current_attributes"
2
5
 
3
6
  module Sidekiq
@@ -18,6 +21,8 @@ module Sidekiq
18
21
  # Sidekiq::CurrentAttributes.persist(["Myapp::Current", "Myapp::OtherCurrent"])
19
22
  #
20
23
  module CurrentAttributes
24
+ Serializer = ::ActiveJob::Arguments
25
+
21
26
  class Save
22
27
  include Sidekiq::ClientMiddleware
23
28
 
@@ -31,7 +36,7 @@ module Sidekiq
31
36
  attrs = strklass.constantize.attributes
32
37
  # Retries can push the job N times, we don't
33
38
  # want retries to reset cattr. #5692, #5090
34
- job[key] = attrs if attrs.any?
39
+ job[key] = Serializer.serialize(attrs) if attrs.any?
35
40
  end
36
41
  end
37
42
  yield
@@ -51,7 +56,7 @@ module Sidekiq
51
56
  @cattrs.each do |(key, strklass)|
52
57
  next unless job.has_key?(key)
53
58
 
54
- klass_attrs[strklass.constantize] = job[key]
59
+ klass_attrs[strklass.constantize] = Serializer.deserialize(job[key]).to_h
55
60
  end
56
61
 
57
62
  wrap(klass_attrs.to_a, &block)
@@ -66,11 +71,14 @@ module Sidekiq
66
71
  retried = false
67
72
 
68
73
  begin
74
+ set_succeeded = false
69
75
  klass.set(attrs) do
76
+ set_succeeded = true
70
77
  wrap(klass_attrs, &block)
71
78
  end
72
79
  rescue NoMethodError
73
- raise if retried
80
+ # Don't retry if the no method error didn't come from current attributes
81
+ raise if retried || set_succeeded
74
82
 
75
83
  # It is possible that the `CurrentAttributes` definition
76
84
  # was changed before the job started processing.
@@ -86,7 +94,7 @@ module Sidekiq
86
94
  cattrs = build_cattrs_hash(klass_or_array)
87
95
 
88
96
  config.client_middleware.add Save, cattrs
89
- config.server_middleware.add Load, cattrs
97
+ config.server_middleware.prepend Load, cattrs
90
98
  end
91
99
 
92
100
  private
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module Sidekiq
2
4
  # Server-side middleware must import this Module in order
3
5
  # to get access to server resources during `call`.
@@ -1,4 +1,5 @@
1
1
  #!/usr/bin/env ruby
2
+ # frozen_string_literal: true
2
3
 
3
4
  require "fileutils"
4
5
  require "sidekiq/api"
@@ -98,7 +99,7 @@ class Sidekiq::Monitor
98
99
  pad = opts[:pad] || 0
99
100
  max_length = opts[:max_length] || (80 - pad)
100
101
  out = []
101
- line = ""
102
+ line = +""
102
103
  values.each do |value|
103
104
  if (line.length + value.length) > max_length
104
105
  out << line
@@ -2,6 +2,12 @@
2
2
 
3
3
  module Sidekiq
4
4
  module Paginator
5
+ TYPE_CACHE = {
6
+ "dead" => "zset",
7
+ "retry" => "zset",
8
+ "schedule" => "zset"
9
+ }
10
+
5
11
  def page(key, pageidx = 1, page_size = 25, opts = nil)
6
12
  current_page = (pageidx.to_i < 1) ? 1 : pageidx.to_i
7
13
  pageidx = current_page - 1
@@ -11,7 +17,14 @@ module Sidekiq
11
17
  ending = starting + page_size - 1
12
18
 
13
19
  Sidekiq.redis do |conn|
14
- type = conn.type(key)
20
+ # horrible, think you can make this cleaner?
21
+ type = TYPE_CACHE[key]
22
+ if type
23
+ elsif key.start_with?("queue:")
24
+ type = TYPE_CACHE[key] = "list"
25
+ else
26
+ type = TYPE_CACHE[key] = conn.type(key)
27
+ end
15
28
  rev = opts && opts[:reverse]
16
29
 
17
30
  case type
@@ -3,6 +3,7 @@
3
3
  require "sidekiq/fetch"
4
4
  require "sidekiq/job_logger"
5
5
  require "sidekiq/job_retry"
6
+ require "sidekiq/profiler"
6
7
 
7
8
  module Sidekiq
8
9
  ##
@@ -66,7 +67,7 @@ module Sidekiq
66
67
  @thread ||= safe_thread("#{config.name}/processor", &method(:run))
67
68
  end
68
69
 
69
- private unless $TESTING
70
+ private
70
71
 
71
72
  def run
72
73
  # By setting this thread-local, Sidekiq.redis will access +Sidekiq::Capsule#redis_pool+
@@ -112,13 +113,17 @@ module Sidekiq
112
113
  def handle_fetch_exception(ex)
113
114
  unless @down
114
115
  @down = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
115
- logger.error("Error fetching job: #{ex}")
116
116
  handle_exception(ex)
117
117
  end
118
118
  sleep(1)
119
119
  nil
120
120
  end
121
121
 
122
+ def profile(job, &block)
123
+ return yield unless job["profile"]
124
+ Sidekiq::Profiler.new(config).call(job, &block)
125
+ end
126
+
122
127
  def dispatch(job_hash, queue, jobstr)
123
128
  # since middleware can mutate the job hash
124
129
  # we need to clone it to report the original
@@ -132,17 +137,19 @@ module Sidekiq
132
137
  @retrier.global(jobstr, queue) do
133
138
  @job_logger.call(job_hash, queue) do
134
139
  stats(jobstr, queue) do
135
- # Rails 5 requires a Reloader to wrap code execution. In order to
136
- # constantize the worker and instantiate an instance, we have to call
137
- # the Reloader. It handles code loading, db connection management, etc.
138
- # Effectively this block denotes a "unit of work" to Rails.
139
- @reloader.call do
140
- klass = Object.const_get(job_hash["class"])
141
- inst = klass.new
142
- inst.jid = job_hash["jid"]
143
- inst._context = self
144
- @retrier.local(inst, jobstr, queue) do
145
- yield inst
140
+ profile(job_hash) do
141
+ # Rails 5 requires a Reloader to wrap code execution. In order to
142
+ # constantize the worker and instantiate an instance, we have to call
143
+ # the Reloader. It handles code loading, db connection management, etc.
144
+ # Effectively this block denotes a "unit of work" to Rails.
145
+ @reloader.call do
146
+ klass = Object.const_get(job_hash["class"])
147
+ instance = klass.new
148
+ instance.jid = job_hash["jid"]
149
+ instance._context = self
150
+ @retrier.local(instance, jobstr, queue) do
151
+ yield instance
152
+ end
146
153
  end
147
154
  end
148
155
  end
@@ -165,7 +172,6 @@ module Sidekiq
165
172
  begin
166
173
  job_hash = Sidekiq.load_json(jobstr)
167
174
  rescue => ex
168
- handle_exception(ex, {context: "Invalid JSON for job", jobstr: jobstr})
169
175
  now = Time.now.to_f
170
176
  redis do |conn|
171
177
  conn.multi do |xa|
@@ -174,15 +180,16 @@ module Sidekiq
174
180
  xa.zremrangebyrank("dead", 0, - @capsule.config[:dead_max_jobs])
175
181
  end
176
182
  end
183
+ handle_exception(ex, {context: "Invalid JSON for job", jobstr: jobstr})
177
184
  return uow.acknowledge
178
185
  end
179
186
 
180
187
  ack = false
181
188
  Thread.handle_interrupt(IGNORE_SHUTDOWN_INTERRUPTS) do
182
189
  Thread.handle_interrupt(ALLOW_SHUTDOWN_INTERRUPTS) do
183
- dispatch(job_hash, queue, jobstr) do |inst|
184
- config.server_middleware.invoke(inst, job_hash, queue) do
185
- execute_job(inst, job_hash["args"])
190
+ dispatch(job_hash, queue, jobstr) do |instance|
191
+ config.server_middleware.invoke(instance, job_hash, queue) do
192
+ execute_job(instance, job_hash["args"])
186
193
  end
187
194
  end
188
195
  ack = true
@@ -216,8 +223,8 @@ module Sidekiq
216
223
  end
217
224
  end
218
225
 
219
- def execute_job(inst, cloned_args)
220
- inst.perform(*cloned_args)
226
+ def execute_job(instance, cloned_args)
227
+ instance.perform(*cloned_args)
221
228
  end
222
229
 
223
230
  # Ruby doesn't provide atomic counters out of the box so we'll
@@ -0,0 +1,72 @@
1
+ require "fileutils"
2
+ require "sidekiq/component"
3
+
4
+ module Sidekiq
5
+ # Allows the user to profile jobs running in production.
6
+ # See details in the Profiling wiki page.
7
+ class Profiler
8
+ EXPIRY = 86400 # 1 day
9
+ DEFAULT_OPTIONS = {
10
+ mode: :wall
11
+ }
12
+
13
+ include Sidekiq::Component
14
+ def initialize(config)
15
+ @config = config
16
+ @vernier_output_dir = ENV.fetch("VERNIER_OUTPUT_DIR") { Dir.tmpdir }
17
+ end
18
+
19
+ def call(job, &block)
20
+ return yield unless job["profile"]
21
+
22
+ token = job["profile"]
23
+ type = job["class"]
24
+ jid = job["jid"]
25
+ started_at = Time.now
26
+
27
+ rundata = {
28
+ started_at: started_at.to_i,
29
+ token: token,
30
+ type: type,
31
+ jid: jid,
32
+ # .gz extension tells Vernier to compress the data
33
+ filename: File.join(
34
+ @vernier_output_dir,
35
+ "#{token}-#{type}-#{jid}-#{started_at.strftime("%Y%m%d-%H%M%S")}.json.gz"
36
+ )
37
+ }
38
+ profiler_options = profiler_options(job, rundata)
39
+
40
+ require "vernier"
41
+ begin
42
+ a = Time.now
43
+ rc = Vernier.profile(**profiler_options, &block)
44
+ b = Time.now
45
+
46
+ # Failed jobs will raise an exception on previous line and skip this
47
+ # block. Only successful jobs will persist profile data to Redis.
48
+ key = "#{token}-#{jid}"
49
+ data = File.read(rundata[:filename])
50
+ redis do |conn|
51
+ conn.multi do |m|
52
+ m.zadd("profiles", Time.now.to_f + EXPIRY, key)
53
+ m.hset(key, rundata.merge(elapsed: (b - a), data: data, size: data.bytesize))
54
+ m.expire(key, EXPIRY)
55
+ end
56
+ end
57
+ rc
58
+ ensure
59
+ FileUtils.rm_f(rundata[:filename])
60
+ end
61
+ end
62
+
63
+ private
64
+
65
+ def profiler_options(job, rundata)
66
+ profiler_options = (job["profiler_options"] || {}).transform_keys(&:to_sym)
67
+ profiler_options[:mode] = profiler_options[:mode].to_sym if profiler_options[:mode]
68
+
69
+ DEFAULT_OPTIONS.merge(profiler_options, {out: rundata[:filename]})
70
+ end
71
+ end
72
+ end