sidekiq 5.2.10 → 6.5.12

Sign up to get free protection for your applications and to get access to all the features.
Files changed (126) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +422 -1
  3. data/LICENSE +3 -3
  4. data/README.md +24 -35
  5. data/bin/sidekiq +27 -3
  6. data/bin/sidekiqload +79 -67
  7. data/bin/sidekiqmon +8 -0
  8. data/lib/generators/sidekiq/job_generator.rb +57 -0
  9. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  10. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  11. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  12. data/lib/sidekiq/api.rb +527 -310
  13. data/lib/sidekiq/cli.rb +204 -208
  14. data/lib/sidekiq/client.rb +78 -82
  15. data/lib/sidekiq/component.rb +65 -0
  16. data/lib/sidekiq/delay.rb +8 -7
  17. data/lib/sidekiq/extensions/action_mailer.rb +13 -22
  18. data/lib/sidekiq/extensions/active_record.rb +13 -10
  19. data/lib/sidekiq/extensions/class_methods.rb +14 -11
  20. data/lib/sidekiq/extensions/generic_proxy.rb +7 -5
  21. data/lib/sidekiq/fetch.rb +50 -40
  22. data/lib/sidekiq/job.rb +13 -0
  23. data/lib/sidekiq/job_logger.rb +33 -7
  24. data/lib/sidekiq/job_retry.rb +126 -106
  25. data/lib/sidekiq/job_util.rb +71 -0
  26. data/lib/sidekiq/launcher.rb +177 -83
  27. data/lib/sidekiq/logger.rb +156 -0
  28. data/lib/sidekiq/manager.rb +40 -41
  29. data/lib/sidekiq/metrics/deploy.rb +47 -0
  30. data/lib/sidekiq/metrics/query.rb +153 -0
  31. data/lib/sidekiq/metrics/shared.rb +94 -0
  32. data/lib/sidekiq/metrics/tracking.rb +134 -0
  33. data/lib/sidekiq/middleware/chain.rb +102 -46
  34. data/lib/sidekiq/middleware/current_attributes.rb +63 -0
  35. data/lib/sidekiq/middleware/i18n.rb +7 -7
  36. data/lib/sidekiq/middleware/modules.rb +21 -0
  37. data/lib/sidekiq/monitor.rb +133 -0
  38. data/lib/sidekiq/paginator.rb +28 -16
  39. data/lib/sidekiq/processor.rb +104 -97
  40. data/lib/sidekiq/rails.rb +46 -37
  41. data/lib/sidekiq/redis_client_adapter.rb +154 -0
  42. data/lib/sidekiq/redis_connection.rb +108 -77
  43. data/lib/sidekiq/ring_buffer.rb +29 -0
  44. data/lib/sidekiq/scheduled.rb +105 -42
  45. data/lib/sidekiq/sd_notify.rb +149 -0
  46. data/lib/sidekiq/systemd.rb +24 -0
  47. data/lib/sidekiq/testing/inline.rb +6 -5
  48. data/lib/sidekiq/testing.rb +68 -58
  49. data/lib/sidekiq/transaction_aware_client.rb +45 -0
  50. data/lib/sidekiq/version.rb +2 -1
  51. data/lib/sidekiq/web/action.rb +15 -11
  52. data/lib/sidekiq/web/application.rb +103 -77
  53. data/lib/sidekiq/web/csrf_protection.rb +180 -0
  54. data/lib/sidekiq/web/helpers.rb +125 -95
  55. data/lib/sidekiq/web/router.rb +23 -19
  56. data/lib/sidekiq/web.rb +65 -105
  57. data/lib/sidekiq/worker.rb +259 -109
  58. data/lib/sidekiq.rb +170 -62
  59. data/sidekiq.gemspec +23 -16
  60. data/web/assets/images/apple-touch-icon.png +0 -0
  61. data/web/assets/javascripts/application.js +113 -61
  62. data/web/assets/javascripts/chart.min.js +13 -0
  63. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  64. data/web/assets/javascripts/dashboard.js +53 -89
  65. data/web/assets/javascripts/graph.js +16 -0
  66. data/web/assets/javascripts/metrics.js +262 -0
  67. data/web/assets/stylesheets/application-dark.css +143 -0
  68. data/web/assets/stylesheets/application-rtl.css +0 -4
  69. data/web/assets/stylesheets/application.css +88 -233
  70. data/web/locales/ar.yml +8 -2
  71. data/web/locales/de.yml +14 -2
  72. data/web/locales/el.yml +43 -19
  73. data/web/locales/en.yml +13 -1
  74. data/web/locales/es.yml +18 -2
  75. data/web/locales/fr.yml +10 -3
  76. data/web/locales/ja.yml +14 -1
  77. data/web/locales/lt.yml +83 -0
  78. data/web/locales/pl.yml +4 -4
  79. data/web/locales/pt-br.yml +27 -9
  80. data/web/locales/ru.yml +4 -0
  81. data/web/locales/vi.yml +83 -0
  82. data/web/locales/zh-cn.yml +36 -11
  83. data/web/locales/zh-tw.yml +32 -7
  84. data/web/views/_footer.erb +1 -1
  85. data/web/views/_job_info.erb +3 -2
  86. data/web/views/_nav.erb +1 -1
  87. data/web/views/_poll_link.erb +2 -5
  88. data/web/views/_summary.erb +7 -7
  89. data/web/views/busy.erb +61 -22
  90. data/web/views/dashboard.erb +23 -14
  91. data/web/views/dead.erb +3 -3
  92. data/web/views/layout.erb +3 -1
  93. data/web/views/metrics.erb +69 -0
  94. data/web/views/metrics_for_job.erb +87 -0
  95. data/web/views/morgue.erb +9 -6
  96. data/web/views/queue.erb +23 -10
  97. data/web/views/queues.erb +10 -2
  98. data/web/views/retries.erb +11 -8
  99. data/web/views/retry.erb +3 -3
  100. data/web/views/scheduled.erb +5 -2
  101. metadata +58 -63
  102. data/.circleci/config.yml +0 -61
  103. data/.github/contributing.md +0 -32
  104. data/.github/issue_template.md +0 -11
  105. data/.gitignore +0 -15
  106. data/.travis.yml +0 -11
  107. data/3.0-Upgrade.md +0 -70
  108. data/4.0-Upgrade.md +0 -53
  109. data/5.0-Upgrade.md +0 -56
  110. data/COMM-LICENSE +0 -97
  111. data/Ent-Changes.md +0 -238
  112. data/Gemfile +0 -19
  113. data/Pro-2.0-Upgrade.md +0 -138
  114. data/Pro-3.0-Upgrade.md +0 -44
  115. data/Pro-4.0-Upgrade.md +0 -35
  116. data/Pro-Changes.md +0 -759
  117. data/Rakefile +0 -9
  118. data/bin/sidekiqctl +0 -20
  119. data/code_of_conduct.md +0 -50
  120. data/lib/generators/sidekiq/worker_generator.rb +0 -49
  121. data/lib/sidekiq/core_ext.rb +0 -1
  122. data/lib/sidekiq/ctl.rb +0 -221
  123. data/lib/sidekiq/exception_handler.rb +0 -29
  124. data/lib/sidekiq/logging.rb +0 -122
  125. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
  126. data/lib/sidekiq/util.rb +0 -66
@@ -0,0 +1,47 @@
1
+ require "sidekiq"
2
+ require "time"
3
+
4
+ # This file is designed to be required within the user's
5
+ # deployment script; it should need a bare minimum of dependencies.
6
+ #
7
+ # require "sidekiq/metrics/deploy"
8
+ # gitdesc = `git log -1 --format="%h %s"`.strip
9
+ # d = Sidekiq::Metrics::Deploy.new
10
+ # d.mark(label: gitdesc)
11
+ #
12
+ # Note that you cannot mark more than once per minute. This is a feature, not a bug.
13
+ module Sidekiq
14
+ module Metrics
15
+ class Deploy
16
+ MARK_TTL = 90 * 24 * 60 * 60 # 90 days
17
+
18
+ def initialize(pool = Sidekiq.redis_pool)
19
+ @pool = pool
20
+ end
21
+
22
+ def mark(at: Time.now, label: "")
23
+ # we need to round the timestamp so that we gracefully
24
+ # handle an excepted common error in marking deploys:
25
+ # having every process mark its deploy, leading
26
+ # to N marks for each deploy. Instead we round the time
27
+ # to the minute so that multple marks within that minute
28
+ # will all naturally rollup into one mark per minute.
29
+ whence = at.utc
30
+ floor = Time.utc(whence.year, whence.month, whence.mday, whence.hour, whence.min, 0)
31
+ datecode = floor.strftime("%Y%m%d")
32
+ key = "#{datecode}-marks"
33
+ @pool.with do |c|
34
+ c.pipelined do |pipe|
35
+ pipe.hsetnx(key, floor.iso8601, label)
36
+ pipe.expire(key, MARK_TTL)
37
+ end
38
+ end
39
+ end
40
+
41
+ def fetch(date = Time.now.utc.to_date)
42
+ datecode = date.strftime("%Y%m%d")
43
+ @pool.with { |c| c.hgetall("#{datecode}-marks") }
44
+ end
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,153 @@
1
+ require "sidekiq"
2
+ require "date"
3
+ require "set"
4
+
5
+ require "sidekiq/metrics/shared"
6
+
7
+ module Sidekiq
8
+ module Metrics
9
+ # Allows caller to query for Sidekiq execution metrics within Redis.
10
+ # Caller sets a set of attributes to act as filters. {#fetch} will call
11
+ # Redis and return a Hash of results.
12
+ #
13
+ # NB: all metrics and times/dates are UTC only. We specifically do not
14
+ # support timezones.
15
+ class Query
16
+ def initialize(pool: Sidekiq.redis_pool, now: Time.now)
17
+ @time = now.utc
18
+ @pool = pool
19
+ @klass = nil
20
+ end
21
+
22
+ # Get metric data for all jobs from the last hour
23
+ def top_jobs(minutes: 60)
24
+ result = Result.new
25
+
26
+ time = @time
27
+ redis_results = @pool.with do |conn|
28
+ conn.pipelined do |pipe|
29
+ minutes.times do |idx|
30
+ key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
31
+ pipe.hgetall key
32
+ result.prepend_bucket time
33
+ time -= 60
34
+ end
35
+ end
36
+ end
37
+
38
+ time = @time
39
+ redis_results.each do |hash|
40
+ hash.each do |k, v|
41
+ kls, metric = k.split("|")
42
+ result.job_results[kls].add_metric metric, time, v.to_i
43
+ end
44
+ time -= 60
45
+ end
46
+
47
+ result.marks = fetch_marks(result.starts_at..result.ends_at)
48
+
49
+ result
50
+ end
51
+
52
+ def for_job(klass, minutes: 60)
53
+ result = Result.new
54
+
55
+ time = @time
56
+ redis_results = @pool.with do |conn|
57
+ conn.pipelined do |pipe|
58
+ minutes.times do |idx|
59
+ key = "j|#{time.strftime("%Y%m%d")}|#{time.hour}:#{time.min}"
60
+ pipe.hmget key, "#{klass}|ms", "#{klass}|p", "#{klass}|f"
61
+ result.prepend_bucket time
62
+ time -= 60
63
+ end
64
+ end
65
+ end
66
+
67
+ time = @time
68
+ @pool.with do |conn|
69
+ redis_results.each do |(ms, p, f)|
70
+ result.job_results[klass].add_metric "ms", time, ms.to_i if ms
71
+ result.job_results[klass].add_metric "p", time, p.to_i if p
72
+ result.job_results[klass].add_metric "f", time, f.to_i if f
73
+ result.job_results[klass].add_hist time, Histogram.new(klass).fetch(conn, time)
74
+ time -= 60
75
+ end
76
+ end
77
+
78
+ result.marks = fetch_marks(result.starts_at..result.ends_at)
79
+
80
+ result
81
+ end
82
+
83
+ class Result < Struct.new(:starts_at, :ends_at, :size, :buckets, :job_results, :marks)
84
+ def initialize
85
+ super
86
+ self.buckets = []
87
+ self.marks = []
88
+ self.job_results = Hash.new { |h, k| h[k] = JobResult.new }
89
+ end
90
+
91
+ def prepend_bucket(time)
92
+ buckets.unshift time.strftime("%H:%M")
93
+ self.ends_at ||= time
94
+ self.starts_at = time
95
+ end
96
+ end
97
+
98
+ class JobResult < Struct.new(:series, :hist, :totals)
99
+ def initialize
100
+ super
101
+ self.series = Hash.new { |h, k| h[k] = Hash.new(0) }
102
+ self.hist = Hash.new { |h, k| h[k] = [] }
103
+ self.totals = Hash.new(0)
104
+ end
105
+
106
+ def add_metric(metric, time, value)
107
+ totals[metric] += value
108
+ series[metric][time.strftime("%H:%M")] += value
109
+
110
+ # Include timing measurements in seconds for convenience
111
+ add_metric("s", time, value / 1000.0) if metric == "ms"
112
+ end
113
+
114
+ def add_hist(time, hist_result)
115
+ hist[time.strftime("%H:%M")] = hist_result
116
+ end
117
+
118
+ def total_avg(metric = "ms")
119
+ completed = totals["p"] - totals["f"]
120
+ totals[metric].to_f / completed
121
+ end
122
+
123
+ def series_avg(metric = "ms")
124
+ series[metric].each_with_object(Hash.new(0)) do |(bucket, value), result|
125
+ completed = series.dig("p", bucket) - series.dig("f", bucket)
126
+ result[bucket] = (completed == 0) ? 0 : value.to_f / completed
127
+ end
128
+ end
129
+ end
130
+
131
+ class MarkResult < Struct.new(:time, :label)
132
+ def bucket
133
+ time.strftime("%H:%M")
134
+ end
135
+ end
136
+
137
+ private
138
+
139
+ def fetch_marks(time_range)
140
+ [].tap do |result|
141
+ marks = @pool.with { |c| c.hgetall("#{@time.strftime("%Y%m%d")}-marks") }
142
+
143
+ marks.each do |timestamp, label|
144
+ time = Time.parse(timestamp)
145
+ if time_range.cover? time
146
+ result << MarkResult.new(time, label)
147
+ end
148
+ end
149
+ end
150
+ end
151
+ end
152
+ end
153
+ end
@@ -0,0 +1,94 @@
1
+ require "concurrent"
2
+
3
+ module Sidekiq
4
+ module Metrics
5
+ # TODO Support apps without concurrent-ruby
6
+ Counter = ::Concurrent::AtomicFixnum
7
+
8
+ # Implements space-efficient but statistically useful histogram storage.
9
+ # A precise time histogram stores every time. Instead we break times into a set of
10
+ # known buckets and increment counts of the associated time bucket. Even if we call
11
+ # the histogram a million times, we'll still only store 26 buckets.
12
+ # NB: needs to be thread-safe or resiliant to races.
13
+ #
14
+ # To store this data, we use Redis' BITFIELD command to store unsigned 16-bit counters
15
+ # per bucket per klass per minute. It's unlikely that most people will be executing more
16
+ # than 1000 job/sec for a full minute of a specific type.
17
+ class Histogram
18
+ include Enumerable
19
+
20
+ # This number represents the maximum milliseconds for this bucket.
21
+ # 20 means all job executions up to 20ms, e.g. if a job takes
22
+ # 280ms, it'll increment bucket[7]. Note we can track job executions
23
+ # up to about 5.5 minutes. After that, it's assumed you're probably
24
+ # not too concerned with its performance.
25
+ BUCKET_INTERVALS = [
26
+ 20, 30, 45, 65, 100,
27
+ 150, 225, 335, 500, 750,
28
+ 1100, 1700, 2500, 3800, 5750,
29
+ 8500, 13000, 20000, 30000, 45000,
30
+ 65000, 100000, 150000, 225000, 335000,
31
+ Float::INFINITY # the "maybe your job is too long" bucket
32
+ ]
33
+ LABELS = [
34
+ "20ms", "30ms", "45ms", "65ms", "100ms",
35
+ "150ms", "225ms", "335ms", "500ms", "750ms",
36
+ "1.1s", "1.7s", "2.5s", "3.8s", "5.75s",
37
+ "8.5s", "13s", "20s", "30s", "45s",
38
+ "65s", "100s", "150s", "225s", "335s",
39
+ "Slow"
40
+ ]
41
+
42
+ FETCH = "GET u16 #0 GET u16 #1 GET u16 #2 GET u16 #3 \
43
+ GET u16 #4 GET u16 #5 GET u16 #6 GET u16 #7 \
44
+ GET u16 #8 GET u16 #9 GET u16 #10 GET u16 #11 \
45
+ GET u16 #12 GET u16 #13 GET u16 #14 GET u16 #15 \
46
+ GET u16 #16 GET u16 #17 GET u16 #18 GET u16 #19 \
47
+ GET u16 #20 GET u16 #21 GET u16 #22 GET u16 #23 \
48
+ GET u16 #24 GET u16 #25".split
49
+
50
+ def each
51
+ buckets.each { |counter| yield counter.value }
52
+ end
53
+
54
+ def label(idx)
55
+ LABELS[idx]
56
+ end
57
+
58
+ attr_reader :buckets
59
+ def initialize(klass)
60
+ @klass = klass
61
+ @buckets = Array.new(BUCKET_INTERVALS.size) { Counter.new }
62
+ end
63
+
64
+ def record_time(ms)
65
+ index_to_use = BUCKET_INTERVALS.each_index do |idx|
66
+ break idx if ms < BUCKET_INTERVALS[idx]
67
+ end
68
+
69
+ @buckets[index_to_use].increment
70
+ end
71
+
72
+ def fetch(conn, now = Time.now)
73
+ window = now.utc.strftime("%d-%H:%-M")
74
+ key = "#{@klass}-#{window}"
75
+ conn.bitfield(key, *FETCH)
76
+ end
77
+
78
+ def persist(conn, now = Time.now)
79
+ buckets, @buckets = @buckets, []
80
+ window = now.utc.strftime("%d-%H:%-M")
81
+ key = "#{@klass}-#{window}"
82
+ cmd = [key, "OVERFLOW", "SAT"]
83
+ buckets.each_with_index do |counter, idx|
84
+ val = counter.value
85
+ cmd << "INCRBY" << "u16" << "##{idx}" << val.to_s if val > 0
86
+ end
87
+
88
+ conn.bitfield(*cmd) if cmd.size > 3
89
+ conn.expire(key, 86400)
90
+ key
91
+ end
92
+ end
93
+ end
94
+ end
@@ -0,0 +1,134 @@
1
+ require "time"
2
+ require "sidekiq"
3
+ require "sidekiq/metrics/shared"
4
+
5
+ # This file contains the components which track execution metrics within Sidekiq.
6
+ module Sidekiq
7
+ module Metrics
8
+ class ExecutionTracker
9
+ include Sidekiq::Component
10
+
11
+ def initialize(config)
12
+ @config = config
13
+ @jobs = Hash.new(0)
14
+ @totals = Hash.new(0)
15
+ @grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
16
+ @lock = Mutex.new
17
+ end
18
+
19
+ def track(queue, klass)
20
+ start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
21
+ time_ms = 0
22
+ begin
23
+ begin
24
+ yield
25
+ ensure
26
+ finish = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :millisecond)
27
+ time_ms = finish - start
28
+ end
29
+ # We don't track time for failed jobs as they can have very unpredictable
30
+ # execution times. more important to know average time for successful jobs so we
31
+ # can better recognize when a perf regression is introduced.
32
+ @lock.synchronize {
33
+ @grams[klass].record_time(time_ms)
34
+ @jobs["#{klass}|ms"] += time_ms
35
+ @totals["ms"] += time_ms
36
+ }
37
+ rescue Exception
38
+ @lock.synchronize {
39
+ @jobs["#{klass}|f"] += 1
40
+ @totals["f"] += 1
41
+ }
42
+ raise
43
+ ensure
44
+ @lock.synchronize {
45
+ @jobs["#{klass}|p"] += 1
46
+ @totals["p"] += 1
47
+ }
48
+ end
49
+ end
50
+
51
+ LONG_TERM = 90 * 24 * 60 * 60
52
+ MID_TERM = 7 * 24 * 60 * 60
53
+ SHORT_TERM = 8 * 60 * 60
54
+
55
+ def flush(time = Time.now)
56
+ totals, jobs, grams = reset
57
+ procd = totals["p"]
58
+ fails = totals["f"]
59
+ return if procd == 0 && fails == 0
60
+
61
+ now = time.utc
62
+ nowdate = now.strftime("%Y%m%d")
63
+ nowhour = now.strftime("%Y%m%d|%-H")
64
+ nowmin = now.strftime("%Y%m%d|%-H:%-M")
65
+ count = 0
66
+
67
+ redis do |conn|
68
+ if grams.size > 0
69
+ conn.pipelined do |pipe|
70
+ grams.each do |_, gram|
71
+ gram.persist(pipe, now)
72
+ end
73
+ end
74
+ end
75
+
76
+ [
77
+ ["j", jobs, nowdate, LONG_TERM],
78
+ ["j", jobs, nowhour, MID_TERM],
79
+ ["j", jobs, nowmin, SHORT_TERM]
80
+ ].each do |prefix, data, bucket, ttl|
81
+ # Quietly seed the new 7.0 stats format so migration is painless.
82
+ conn.pipelined do |xa|
83
+ stats = "#{prefix}|#{bucket}"
84
+ # logger.debug "Flushing metrics #{stats}"
85
+ data.each_pair do |key, value|
86
+ xa.hincrby stats, key, value
87
+ count += 1
88
+ end
89
+ xa.expire(stats, ttl)
90
+ end
91
+ end
92
+ logger.info "Flushed #{count} metrics"
93
+ count
94
+ end
95
+ end
96
+
97
+ private
98
+
99
+ def reset
100
+ @lock.synchronize {
101
+ array = [@totals, @jobs, @grams]
102
+ @totals = Hash.new(0)
103
+ @jobs = Hash.new(0)
104
+ @grams = Hash.new { |hash, key| hash[key] = Histogram.new(key) }
105
+ array
106
+ }
107
+ end
108
+ end
109
+
110
+ class Middleware
111
+ include Sidekiq::ServerMiddleware
112
+
113
+ def initialize(options)
114
+ @exec = options
115
+ end
116
+
117
+ def call(_instance, hash, queue, &block)
118
+ @exec.track(queue, hash["wrapped"] || hash["class"], &block)
119
+ end
120
+ end
121
+ end
122
+ end
123
+
124
+ if ENV["SIDEKIQ_METRICS_BETA"] == "1"
125
+ Sidekiq.configure_server do |config|
126
+ exec = Sidekiq::Metrics::ExecutionTracker.new(config)
127
+ config.server_middleware do |chain|
128
+ chain.add Sidekiq::Metrics::Middleware, exec
129
+ end
130
+ config.on(:beat) do
131
+ exec.flush
132
+ end
133
+ end
134
+ end
@@ -1,118 +1,163 @@
1
1
  # frozen_string_literal: true
2
+
3
+ require "sidekiq/middleware/modules"
4
+
2
5
  module Sidekiq
3
6
  # Middleware is code configured to run before/after
4
- # a message is processed. It is patterned after Rack
7
+ # a job is processed. It is patterned after Rack
5
8
  # middleware. Middleware exists for the client side
6
9
  # (pushing jobs onto the queue) as well as the server
7
10
  # side (when jobs are actually processed).
8
11
  #
12
+ # Callers will register middleware Classes and Sidekiq will
13
+ # create new instances of the middleware for every job. This
14
+ # is important so that instance state is not shared accidentally
15
+ # between job executions.
16
+ #
9
17
  # To add middleware for the client:
10
18
  #
11
- # Sidekiq.configure_client do |config|
12
- # config.client_middleware do |chain|
13
- # chain.add MyClientHook
19
+ # Sidekiq.configure_client do |config|
20
+ # config.client_middleware do |chain|
21
+ # chain.add MyClientHook
22
+ # end
14
23
  # end
15
- # end
16
24
  #
17
25
  # To modify middleware for the server, just call
18
26
  # with another block:
19
27
  #
20
- # Sidekiq.configure_server do |config|
21
- # config.server_middleware do |chain|
22
- # chain.add MyServerHook
23
- # chain.remove ActiveRecord
28
+ # Sidekiq.configure_server do |config|
29
+ # config.server_middleware do |chain|
30
+ # chain.add MyServerHook
31
+ # chain.remove ActiveRecord
32
+ # end
24
33
  # end
25
- # end
26
34
  #
27
35
  # To insert immediately preceding another entry:
28
36
  #
29
- # Sidekiq.configure_client do |config|
30
- # config.client_middleware do |chain|
31
- # chain.insert_before ActiveRecord, MyClientHook
37
+ # Sidekiq.configure_client do |config|
38
+ # config.client_middleware do |chain|
39
+ # chain.insert_before ActiveRecord, MyClientHook
40
+ # end
32
41
  # end
33
- # end
34
42
  #
35
43
  # To insert immediately after another entry:
36
44
  #
37
- # Sidekiq.configure_client do |config|
38
- # config.client_middleware do |chain|
39
- # chain.insert_after ActiveRecord, MyClientHook
45
+ # Sidekiq.configure_client do |config|
46
+ # config.client_middleware do |chain|
47
+ # chain.insert_after ActiveRecord, MyClientHook
48
+ # end
40
49
  # end
41
- # end
42
50
  #
43
51
  # This is an example of a minimal server middleware:
44
52
  #
45
- # class MyServerHook
46
- # def call(worker_instance, msg, queue)
47
- # puts "Before work"
48
- # yield
49
- # puts "After work"
53
+ # class MyServerHook
54
+ # include Sidekiq::ServerMiddleware
55
+ #
56
+ # def call(job_instance, msg, queue)
57
+ # logger.info "Before job"
58
+ # redis {|conn| conn.get("foo") } # do something in Redis
59
+ # yield
60
+ # logger.info "After job"
61
+ # end
50
62
  # end
51
- # end
52
63
  #
53
64
  # This is an example of a minimal client middleware, note
54
65
  # the method must return the result or the job will not push
55
66
  # to Redis:
56
67
  #
57
- # class MyClientHook
58
- # def call(worker_class, msg, queue, redis_pool)
59
- # puts "Before push"
60
- # result = yield
61
- # puts "After push"
62
- # result
68
+ # class MyClientHook
69
+ # include Sidekiq::ClientMiddleware
70
+ #
71
+ # def call(job_class, msg, queue, redis_pool)
72
+ # logger.info "Before push"
73
+ # result = yield
74
+ # logger.info "After push"
75
+ # result
76
+ # end
63
77
  # end
64
- # end
65
78
  #
66
79
  module Middleware
67
80
  class Chain
68
81
  include Enumerable
69
- attr_reader :entries
70
82
 
83
+ # A unique instance of the middleware chain is created for
84
+ # each job executed in order to be thread-safe.
85
+ # @param copy [Sidekiq::Middleware::Chain] New instance of Chain
86
+ # @returns nil
71
87
  def initialize_copy(copy)
72
88
  copy.instance_variable_set(:@entries, entries.dup)
89
+ nil
73
90
  end
74
91
 
92
+ # Iterate through each middleware in the chain
75
93
  def each(&block)
76
94
  entries.each(&block)
77
95
  end
78
96
 
79
- def initialize
80
- @entries = []
97
+ # @api private
98
+ def initialize(config = nil) # :nodoc:
99
+ @config = config
100
+ @entries = nil
81
101
  yield self if block_given?
82
102
  end
83
103
 
104
+ def entries
105
+ @entries ||= []
106
+ end
107
+
108
+ # Remove all middleware matching the given Class
109
+ # @param klass [Class]
84
110
  def remove(klass)
85
111
  entries.delete_if { |entry| entry.klass == klass }
86
112
  end
87
113
 
114
+ # Add the given middleware to the end of the chain.
115
+ # Sidekiq will call `klass.new(*args)` to create a clean
116
+ # copy of your middleware for every job executed.
117
+ #
118
+ # chain.add(Statsd::Metrics, { collector: "localhost:8125" })
119
+ #
120
+ # @param klass [Class] Your middleware class
121
+ # @param *args [Array<Object>] Set of arguments to pass to every instance of your middleware
88
122
  def add(klass, *args)
89
- remove(klass) if exists?(klass)
90
- entries << Entry.new(klass, *args)
123
+ remove(klass)
124
+ entries << Entry.new(@config, klass, *args)
91
125
  end
92
126
 
127
+ # Identical to {#add} except the middleware is added to the front of the chain.
93
128
  def prepend(klass, *args)
94
- remove(klass) if exists?(klass)
95
- entries.insert(0, Entry.new(klass, *args))
129
+ remove(klass)
130
+ entries.insert(0, Entry.new(@config, klass, *args))
96
131
  end
97
132
 
133
+ # Inserts +newklass+ before +oldklass+ in the chain.
134
+ # Useful if one middleware must run before another middleware.
98
135
  def insert_before(oldklass, newklass, *args)
99
136
  i = entries.index { |entry| entry.klass == newklass }
100
- new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
137
+ new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
101
138
  i = entries.index { |entry| entry.klass == oldklass } || 0
102
139
  entries.insert(i, new_entry)
103
140
  end
104
141
 
142
+ # Inserts +newklass+ after +oldklass+ in the chain.
143
+ # Useful if one middleware must run after another middleware.
105
144
  def insert_after(oldklass, newklass, *args)
106
145
  i = entries.index { |entry| entry.klass == newklass }
107
- new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
146
+ new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
108
147
  i = entries.index { |entry| entry.klass == oldklass } || entries.count - 1
109
- entries.insert(i+1, new_entry)
148
+ entries.insert(i + 1, new_entry)
110
149
  end
111
150
 
151
+ # @return [Boolean] if the given class is already in the chain
112
152
  def exists?(klass)
113
153
  any? { |entry| entry.klass == klass }
114
154
  end
115
155
 
156
+ # @return [Boolean] if the chain contains no middleware
157
+ def empty?
158
+ @entries.nil? || @entries.empty?
159
+ end
160
+
116
161
  def retrieve
117
162
  map(&:make_new)
118
163
  end
@@ -121,9 +166,13 @@ module Sidekiq
121
166
  entries.clear
122
167
  end
123
168
 
169
+ # Used by Sidekiq to execute the middleware at runtime
170
+ # @api private
124
171
  def invoke(*args)
125
- chain = retrieve.dup
126
- traverse_chain = lambda do
172
+ return yield if empty?
173
+
174
+ chain = retrieve
175
+ traverse_chain = proc do
127
176
  if chain.empty?
128
177
  yield
129
178
  else
@@ -134,16 +183,23 @@ module Sidekiq
134
183
  end
135
184
  end
136
185
 
186
+ private
187
+
188
+ # Represents each link in the middleware chain
189
+ # @api private
137
190
  class Entry
138
191
  attr_reader :klass
139
192
 
140
- def initialize(klass, *args)
193
+ def initialize(config, klass, *args)
194
+ @config = config
141
195
  @klass = klass
142
- @args = args
196
+ @args = args
143
197
  end
144
198
 
145
199
  def make_new
146
- @klass.new(*@args)
200
+ x = @klass.new(*@args)
201
+ x.config = @config if @config && x.respond_to?(:config=)
202
+ x
147
203
  end
148
204
  end
149
205
  end