sidekiq 6.5.8 → 7.0.3

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (102) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +51 -6
  3. data/{LICENSE → LICENSE.txt} +0 -0
  4. data/README.md +13 -12
  5. data/bin/sidekiq +3 -8
  6. data/bin/sidekiqload +15 -24
  7. data/lib/sidekiq/api.rb +83 -120
  8. data/lib/sidekiq/capsule.rb +126 -0
  9. data/lib/sidekiq/cli.rb +54 -72
  10. data/lib/sidekiq/client.rb +30 -17
  11. data/lib/sidekiq/component.rb +1 -0
  12. data/lib/sidekiq/config.rb +270 -0
  13. data/lib/sidekiq/deploy.rb +62 -0
  14. data/lib/sidekiq/embedded.rb +61 -0
  15. data/lib/sidekiq/fetch.rb +11 -14
  16. data/lib/sidekiq/job.rb +375 -10
  17. data/lib/sidekiq/job_logger.rb +2 -2
  18. data/lib/sidekiq/job_retry.rb +9 -9
  19. data/lib/sidekiq/job_util.rb +4 -4
  20. data/lib/sidekiq/launcher.rb +63 -60
  21. data/lib/sidekiq/logger.rb +1 -26
  22. data/lib/sidekiq/manager.rb +9 -11
  23. data/lib/sidekiq/metrics/query.rb +3 -3
  24. data/lib/sidekiq/metrics/shared.rb +4 -3
  25. data/lib/sidekiq/metrics/tracking.rb +18 -18
  26. data/lib/sidekiq/middleware/chain.rb +7 -9
  27. data/lib/sidekiq/middleware/current_attributes.rb +8 -15
  28. data/lib/sidekiq/monitor.rb +17 -2
  29. data/lib/sidekiq/paginator.rb +2 -2
  30. data/lib/sidekiq/processor.rb +17 -26
  31. data/lib/sidekiq/rails.rb +4 -9
  32. data/lib/sidekiq/redis_client_adapter.rb +8 -47
  33. data/lib/sidekiq/redis_connection.rb +11 -111
  34. data/lib/sidekiq/scheduled.rb +20 -21
  35. data/lib/sidekiq/testing.rb +5 -33
  36. data/lib/sidekiq/transaction_aware_client.rb +4 -5
  37. data/lib/sidekiq/version.rb +2 -1
  38. data/lib/sidekiq/web/application.rb +7 -4
  39. data/lib/sidekiq/web/csrf_protection.rb +1 -1
  40. data/lib/sidekiq/web/helpers.rb +17 -16
  41. data/lib/sidekiq/web.rb +6 -17
  42. data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
  43. data/lib/sidekiq.rb +76 -274
  44. data/sidekiq.gemspec +15 -5
  45. data/web/assets/javascripts/application.js +18 -0
  46. data/web/assets/javascripts/base-charts.js +106 -0
  47. data/web/assets/javascripts/dashboard-charts.js +166 -0
  48. data/web/assets/javascripts/dashboard.js +3 -223
  49. data/web/assets/javascripts/metrics.js +90 -116
  50. data/web/assets/stylesheets/application-dark.css +4 -0
  51. data/web/assets/stylesheets/application-rtl.css +2 -91
  52. data/web/assets/stylesheets/application.css +23 -298
  53. data/web/locales/ar.yml +70 -70
  54. data/web/locales/cs.yml +62 -62
  55. data/web/locales/da.yml +52 -52
  56. data/web/locales/de.yml +65 -65
  57. data/web/locales/el.yml +2 -7
  58. data/web/locales/en.yml +76 -70
  59. data/web/locales/es.yml +68 -68
  60. data/web/locales/fa.yml +65 -65
  61. data/web/locales/fr.yml +67 -67
  62. data/web/locales/he.yml +65 -64
  63. data/web/locales/hi.yml +59 -59
  64. data/web/locales/it.yml +53 -53
  65. data/web/locales/ja.yml +64 -68
  66. data/web/locales/ko.yml +52 -52
  67. data/web/locales/lt.yml +66 -66
  68. data/web/locales/nb.yml +61 -61
  69. data/web/locales/nl.yml +52 -52
  70. data/web/locales/pl.yml +45 -45
  71. data/web/locales/pt-br.yml +59 -69
  72. data/web/locales/pt.yml +51 -51
  73. data/web/locales/ru.yml +67 -66
  74. data/web/locales/sv.yml +53 -53
  75. data/web/locales/ta.yml +60 -60
  76. data/web/locales/uk.yml +62 -61
  77. data/web/locales/ur.yml +64 -64
  78. data/web/locales/vi.yml +67 -67
  79. data/web/locales/zh-cn.yml +20 -18
  80. data/web/locales/zh-tw.yml +10 -1
  81. data/web/views/_footer.erb +5 -2
  82. data/web/views/_job_info.erb +18 -2
  83. data/web/views/_paging.erb +2 -0
  84. data/web/views/_poll_link.erb +1 -1
  85. data/web/views/busy.erb +36 -25
  86. data/web/views/dashboard.erb +36 -5
  87. data/web/views/metrics.erb +30 -19
  88. data/web/views/metrics_for_job.erb +17 -35
  89. data/web/views/morgue.erb +5 -9
  90. data/web/views/queue.erb +10 -14
  91. data/web/views/queues.erb +3 -1
  92. data/web/views/retries.erb +5 -9
  93. data/web/views/scheduled.erb +12 -13
  94. metadata +43 -34
  95. data/lib/sidekiq/delay.rb +0 -43
  96. data/lib/sidekiq/extensions/action_mailer.rb +0 -48
  97. data/lib/sidekiq/extensions/active_record.rb +0 -43
  98. data/lib/sidekiq/extensions/class_methods.rb +0 -43
  99. data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
  100. data/lib/sidekiq/metrics/deploy.rb +0 -47
  101. data/lib/sidekiq/worker.rb +0 -370
  102. data/web/assets/javascripts/graph.js +0 -16
@@ -1,12 +1,12 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "sidekiq/manager"
4
- require "sidekiq/fetch"
4
+ require "sidekiq/capsule"
5
5
  require "sidekiq/scheduled"
6
6
  require "sidekiq/ring_buffer"
7
7
 
8
8
  module Sidekiq
9
- # The Launcher starts the Manager and Poller threads and provides the process heartbeat.
9
+ # The Launcher starts the Capsule Managers, the Poller thread and provides the process heartbeat.
10
10
  class Launcher
11
11
  include Sidekiq::Component
12
12
 
@@ -16,48 +16,55 @@ module Sidekiq
16
16
  proc { "sidekiq" },
17
17
  proc { Sidekiq::VERSION },
18
18
  proc { |me, data| data["tag"] },
19
- proc { |me, data| "[#{Processor::WORK_STATE.size} of #{data["concurrency"]} busy]" },
19
+ proc { |me, data| "[#{Processor::WORK_STATE.size} of #{me.config.total_concurrency} busy]" },
20
20
  proc { |me, data| "stopping" if me.stopping? }
21
21
  ]
22
22
 
23
- attr_accessor :manager, :poller, :fetcher
23
+ attr_accessor :managers, :poller
24
24
 
25
- def initialize(options)
26
- @config = options
27
- options[:fetch] ||= BasicFetch.new(options)
28
- @manager = Sidekiq::Manager.new(options)
29
- @poller = Sidekiq::Scheduled::Poller.new(options)
25
+ def initialize(config, embedded: false)
26
+ @config = config
27
+ @embedded = embedded
28
+ @managers = config.capsules.values.map do |cap|
29
+ Sidekiq::Manager.new(cap)
30
+ end
31
+ @poller = Sidekiq::Scheduled::Poller.new(@config)
30
32
  @done = false
31
33
  end
32
34
 
33
- def run
34
- @thread = safe_thread("heartbeat", &method(:start_heartbeat))
35
+ # Start this Sidekiq instance. If an embedding process already
36
+ # has a heartbeat thread, caller can use `async_beat: false`
37
+ # and instead have thread call Launcher#heartbeat every N seconds.
38
+ def run(async_beat: true)
39
+ Sidekiq.freeze!
40
+ @thread = safe_thread("heartbeat", &method(:start_heartbeat)) if async_beat
35
41
  @poller.start
36
- @manager.start
42
+ @managers.each(&:start)
37
43
  end
38
44
 
39
45
  # Stops this instance from processing any more jobs,
40
- #
41
46
  def quiet
47
+ return if @done
48
+
42
49
  @done = true
43
- @manager.quiet
50
+ @managers.each(&:quiet)
44
51
  @poller.terminate
52
+ fire_event(:quiet, reverse: true)
45
53
  end
46
54
 
47
55
  # Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
48
56
  def stop
49
57
  deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
50
58
 
51
- @done = true
52
- @manager.quiet
53
- @poller.terminate
54
-
55
- @manager.stop(deadline)
59
+ quiet
60
+ stoppers = @managers.map do |mgr|
61
+ Thread.new do
62
+ mgr.stop(deadline)
63
+ end
64
+ end
56
65
 
57
- # Requeue everything in case there was a thread which fetched a job while the process was stopped.
58
- # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
59
- strategy = @config[:fetch]
60
- strategy.bulk_requeue([], @config)
66
+ fire_event(:shutdown, reverse: true)
67
+ stoppers.each(&:join)
61
68
 
62
69
  clear_heartbeat
63
70
  end
@@ -66,18 +73,30 @@ module Sidekiq
66
73
  @done
67
74
  end
68
75
 
76
+ # If embedding Sidekiq, you can have the process heartbeat
77
+ # call this method to regularly heartbeat rather than creating
78
+ # a separate thread.
79
+ def heartbeat
80
+
81
+ end
82
+
69
83
  private unless $TESTING
70
84
 
71
- BEAT_PAUSE = 5
85
+ BEAT_PAUSE = 10
72
86
 
73
87
  def start_heartbeat
74
88
  loop do
75
- heartbeat
89
+ beat
76
90
  sleep BEAT_PAUSE
77
91
  end
78
92
  logger.info("Heartbeat stopping...")
79
93
  end
80
94
 
95
+ def beat
96
+ $0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ") unless @embedded
97
+
98
+ end
99
+
81
100
  def clear_heartbeat
82
101
  flush_stats
83
102
 
@@ -94,12 +113,6 @@ module Sidekiq
94
113
  # best effort, ignore network errors
95
114
  end
96
115
 
97
- def heartbeat
98
- $0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
99
-
100
-
101
- end
102
-
103
116
  def flush_stats
104
117
  fails = Processor::FAILURE.reset
105
118
  procd = Processor::PROCESSED.reset
@@ -107,7 +120,7 @@ module Sidekiq
107
120
 
108
121
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
109
122
  begin
110
- Sidekiq.redis do |conn|
123
+ redis do |conn|
111
124
  conn.pipelined do |pipeline|
112
125
  pipeline.incrby("stat:processed", procd)
113
126
  pipeline.incrby("stat:processed:#{nowdate}", procd)
@@ -119,9 +132,7 @@ module Sidekiq
119
132
  end
120
133
  end
121
134
  rescue => ex
122
- # we're exiting the process, things might be shut down so don't
123
- # try to handle the exception
124
- Sidekiq.logger.warn("Unable to flush stats: #{ex}")
135
+ logger.warn("Unable to flush stats: #{ex}")
125
136
  end
126
137
  end
127
138
 
@@ -130,23 +141,10 @@ module Sidekiq
130
141
  fails = procd = 0
131
142
 
132
143
  begin
133
- fails = Processor::FAILURE.reset
134
- procd = Processor::PROCESSED.reset
135
- curstate = Processor::WORK_STATE.dup
136
-
137
- nowdate = Time.now.utc.strftime("%Y-%m-%d")
144
+ flush_stats
138
145
 
146
+ curstate = Processor::WORK_STATE.dup
139
147
  redis do |conn|
140
- conn.multi do |transaction|
141
- transaction.incrby("stat:processed", procd)
142
- transaction.incrby("stat:processed:#{nowdate}", procd)
143
- transaction.expire("stat:processed:#{nowdate}", STATS_TTL)
144
-
145
- transaction.incrby("stat:failed", fails)
146
- transaction.incrby("stat:failed:#{nowdate}", fails)
147
- transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
148
- end
149
-
150
148
  # work is the current set of executing jobs
151
149
  work_key = "#{key}:work"
152
150
  conn.pipelined do |transaction|
@@ -163,10 +161,10 @@ module Sidekiq
163
161
  fails = procd = 0
164
162
  kb = memory_usage(::Process.pid)
165
163
 
166
- _, exists, _, _, msg = redis { |conn|
164
+ _, exists, _, _, signal = redis { |conn|
167
165
  conn.multi { |transaction|
168
166
  transaction.sadd("processes", [key])
169
- transaction.exists?(key)
167
+ transaction.exists(key)
170
168
  transaction.hmset(key, "info", to_json,
171
169
  "busy", curstate.size,
172
170
  "beat", Time.now.to_f,
@@ -179,12 +177,10 @@ module Sidekiq
179
177
  }
180
178
 
181
179
  # first heartbeat or recovering from an outage and need to reestablish our heartbeat
182
- fire_event(:heartbeat) unless exists
180
+ fire_event(:heartbeat) unless exists > 0
183
181
  fire_event(:beat, oneshot: false)
184
182
 
185
- return unless msg
186
-
187
- ::Process.kill(msg, ::Process.pid)
183
+ ::Process.kill(signal, ::Process.pid) if signal && !@embedded
188
184
  rescue => e
189
185
  # ignore all redis/network issues
190
186
  logger.error("heartbeat: #{e}")
@@ -251,13 +247,20 @@ module Sidekiq
251
247
  "started_at" => Time.now.to_f,
252
248
  "pid" => ::Process.pid,
253
249
  "tag" => @config[:tag] || "",
254
- "concurrency" => @config[:concurrency],
255
- "queues" => @config[:queues].uniq,
256
- "labels" => @config[:labels],
257
- "identity" => identity
250
+ "concurrency" => @config.total_concurrency,
251
+ "queues" => @config.capsules.values.flat_map { |cap| cap.queues }.uniq,
252
+ "weights" => to_weights,
253
+ "labels" => @config[:labels].to_a,
254
+ "identity" => identity,
255
+ "version" => Sidekiq::VERSION,
256
+ "embedded" => @embedded
258
257
  }
259
258
  end
260
259
 
260
+ def to_weights
261
+ @config.capsules.values.map(&:weights)
262
+ end
263
+
261
264
  def to_json
262
265
  # this data changes infrequently so dump it to a string
263
266
  # now so we don't need to dump it every heartbeat.
@@ -31,7 +31,7 @@ module Sidekiq
31
31
  "fatal" => 4
32
32
  }
33
33
  LEVELS.default_proc = proc do |_, level|
34
- Sidekiq.logger.warn("Invalid log level: #{level.inspect}")
34
+ puts("Invalid log level: #{level.inspect}")
35
35
  nil
36
36
  end
37
37
 
@@ -70,36 +70,11 @@ module Sidekiq
70
70
  ensure
71
71
  self.local_level = old_local_level
72
72
  end
73
-
74
- # Redefined to check severity against #level, and thus the thread-local level, rather than +@level+.
75
- # FIXME: Remove when the minimum Ruby version supports overriding Logger#level.
76
- def add(severity, message = nil, progname = nil, &block)
77
- severity ||= ::Logger::UNKNOWN
78
- progname ||= @progname
79
-
80
- return true if @logdev.nil? || severity < level
81
-
82
- if message.nil?
83
- if block
84
- message = yield
85
- else
86
- message = progname
87
- progname = @progname
88
- end
89
- end
90
-
91
- @logdev.write format_message(format_severity(severity), Time.now, progname, message)
92
- end
93
73
  end
94
74
 
95
75
  class Logger < ::Logger
96
76
  include LoggingUtils
97
77
 
98
- def initialize(*args, **kwargs)
99
- super
100
- self.formatter = Sidekiq.log_formatter
101
- end
102
-
103
78
  module Formatters
104
79
  class Base < ::Logger::Formatter
105
80
  def tid
@@ -1,7 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "sidekiq/processor"
4
- require "sidekiq/fetch"
5
4
  require "set"
6
5
 
7
6
  module Sidekiq
@@ -23,19 +22,19 @@ module Sidekiq
23
22
  include Sidekiq::Component
24
23
 
25
24
  attr_reader :workers
25
+ attr_reader :capsule
26
26
 
27
- def initialize(options = {})
28
- @config = options
29
- logger.debug { options.inspect }
30
- @count = options[:concurrency] || 10
27
+ def initialize(capsule)
28
+ @config = @capsule = capsule
29
+ @count = capsule.concurrency
31
30
  raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
32
31
 
33
32
  @done = false
34
33
  @workers = Set.new
34
+ @plock = Mutex.new
35
35
  @count.times do
36
36
  @workers << Processor.new(@config, &method(:processor_result))
37
37
  end
38
- @plock = Mutex.new
39
38
  end
40
39
 
41
40
  def start
@@ -46,14 +45,12 @@ module Sidekiq
46
45
  return if @done
47
46
  @done = true
48
47
 
49
- logger.info { "Terminating quiet threads" }
48
+ logger.info { "Terminating quiet threads for #{capsule.name} capsule" }
50
49
  @workers.each(&:terminate)
51
- fire_event(:quiet, reverse: true)
52
50
  end
53
51
 
54
52
  def stop(deadline)
55
53
  quiet
56
- fire_event(:shutdown, reverse: true)
57
54
 
58
55
  # some of the shutdown events can be async,
59
56
  # we don't have any way to know when they're done but
@@ -66,6 +63,8 @@ module Sidekiq
66
63
  return if @workers.empty?
67
64
 
68
65
  hard_shutdown
66
+ ensure
67
+ capsule.stop
69
68
  end
70
69
 
71
70
  def processor_result(processor, reason = nil)
@@ -105,8 +104,7 @@ module Sidekiq
105
104
  # contract says that jobs are run AT LEAST once. Process termination
106
105
  # is delayed until we're certain the jobs are back in Redis because
107
106
  # it is worse to lose a job than to run it twice.
108
- strategy = @config[:fetch]
109
- strategy.bulk_requeue(jobs, @config)
107
+ capsule.fetcher.bulk_requeue(jobs)
110
108
  end
111
109
 
112
110
  cleanup.each do |processor|
@@ -13,9 +13,9 @@ module Sidekiq
13
13
  # NB: all metrics and times/dates are UTC only. We specifically do not
14
14
  # support timezones.
15
15
  class Query
16
- def initialize(pool: Sidekiq.redis_pool, now: Time.now)
16
+ def initialize(pool: nil, now: Time.now)
17
17
  @time = now.utc
18
- @pool = pool
18
+ @pool = pool || Sidekiq.default_configuration.redis_pool
19
19
  @klass = nil
20
20
  end
21
21
 
@@ -123,7 +123,7 @@ module Sidekiq
123
123
  def series_avg(metric = "ms")
124
124
  series[metric].each_with_object(Hash.new(0)) do |(bucket, value), result|
125
125
  completed = series.dig("p", bucket) - series.dig("f", bucket)
126
- result[bucket] = completed == 0 ? 0 : value.to_f / completed
126
+ result[bucket] = (completed == 0) ? 0 : value.to_f / completed
127
127
  end
128
128
  end
129
129
  end
@@ -2,7 +2,8 @@ require "concurrent"
2
2
 
3
3
  module Sidekiq
4
4
  module Metrics
5
- # TODO Support apps without concurrent-ruby
5
+ # This is the only dependency on concurrent-ruby in Sidekiq but it's
6
+ # mandatory for thread-safety until MRI supports atomic operations on values.
6
7
  Counter = ::Concurrent::AtomicFixnum
7
8
 
8
9
  # Implements space-efficient but statistically useful histogram storage.
@@ -38,7 +39,6 @@ module Sidekiq
38
39
  "65s", "100s", "150s", "225s", "335s",
39
40
  "Slow"
40
41
  ]
41
-
42
42
  FETCH = "GET u16 #0 GET u16 #1 GET u16 #2 GET u16 #3 \
43
43
  GET u16 #4 GET u16 #5 GET u16 #6 GET u16 #7 \
44
44
  GET u16 #8 GET u16 #9 GET u16 #10 GET u16 #11 \
@@ -46,6 +46,7 @@ module Sidekiq
46
46
  GET u16 #16 GET u16 #17 GET u16 #18 GET u16 #19 \
47
47
  GET u16 #20 GET u16 #21 GET u16 #22 GET u16 #23 \
48
48
  GET u16 #24 GET u16 #25".split
49
+ HISTOGRAM_TTL = 8 * 60 * 60
49
50
 
50
51
  def each
51
52
  buckets.each { |counter| yield counter.value }
@@ -86,7 +87,7 @@ module Sidekiq
86
87
  end
87
88
 
88
89
  conn.bitfield(*cmd) if cmd.size > 3
89
- conn.expire(key, 86400)
90
+ conn.expire(key, HISTOGRAM_TTL)
90
91
  key
91
92
  end
92
93
  end
@@ -48,8 +48,8 @@ module Sidekiq
48
48
  end
49
49
  end
50
50
 
51
- LONG_TERM = 90 * 24 * 60 * 60
52
- MID_TERM = 7 * 24 * 60 * 60
51
+ # LONG_TERM = 90 * 24 * 60 * 60
52
+ # MID_TERM = 7 * 24 * 60 * 60
53
53
  SHORT_TERM = 8 * 60 * 60
54
54
 
55
55
  def flush(time = Time.now)
@@ -59,12 +59,13 @@ module Sidekiq
59
59
  return if procd == 0 && fails == 0
60
60
 
61
61
  now = time.utc
62
- nowdate = now.strftime("%Y%m%d")
63
- nowhour = now.strftime("%Y%m%d|%-H")
62
+ # nowdate = now.strftime("%Y%m%d")
63
+ # nowhour = now.strftime("%Y%m%d|%-H")
64
64
  nowmin = now.strftime("%Y%m%d|%-H:%-M")
65
65
  count = 0
66
66
 
67
67
  redis do |conn|
68
+ # persist fine-grained histogram data
68
69
  if grams.size > 0
69
70
  conn.pipelined do |pipe|
70
71
  grams.each do |_, gram|
@@ -73,15 +74,16 @@ module Sidekiq
73
74
  end
74
75
  end
75
76
 
77
+ # persist coarse grained execution count + execution millis.
78
+ # note as of today we don't use or do anything with the
79
+ # daily or hourly rollups.
76
80
  [
77
- ["j", jobs, nowdate, LONG_TERM],
78
- ["j", jobs, nowhour, MID_TERM],
81
+ # ["j", jobs, nowdate, LONG_TERM],
82
+ # ["j", jobs, nowhour, MID_TERM],
79
83
  ["j", jobs, nowmin, SHORT_TERM]
80
84
  ].each do |prefix, data, bucket, ttl|
81
- # Quietly seed the new 7.0 stats format so migration is painless.
82
85
  conn.pipelined do |xa|
83
86
  stats = "#{prefix}|#{bucket}"
84
- # logger.debug "Flushing metrics #{stats}"
85
87
  data.each_pair do |key, value|
86
88
  xa.hincrby stats, key, value
87
89
  count += 1
@@ -89,7 +91,7 @@ module Sidekiq
89
91
  xa.expire(stats, ttl)
90
92
  end
91
93
  end
92
- logger.info "Flushed #{count} metrics"
94
+ logger.debug "Flushed #{count} metrics"
93
95
  count
94
96
  end
95
97
  end
@@ -121,14 +123,12 @@ module Sidekiq
121
123
  end
122
124
  end
123
125
 
124
- if ENV["SIDEKIQ_METRICS_BETA"] == "1"
125
- Sidekiq.configure_server do |config|
126
- exec = Sidekiq::Metrics::ExecutionTracker.new(config)
127
- config.server_middleware do |chain|
128
- chain.add Sidekiq::Metrics::Middleware, exec
129
- end
130
- config.on(:beat) do
131
- exec.flush
132
- end
126
+ Sidekiq.configure_server do |config|
127
+ exec = Sidekiq::Metrics::ExecutionTracker.new(config)
128
+ config.server_middleware do |chain|
129
+ chain.add Sidekiq::Metrics::Middleware, exec
130
+ end
131
+ config.on(:beat) do
132
+ exec.flush
133
133
  end
134
134
  end
@@ -80,15 +80,6 @@ module Sidekiq
80
80
  class Chain
81
81
  include Enumerable
82
82
 
83
- # A unique instance of the middleware chain is created for
84
- # each job executed in order to be thread-safe.
85
- # @param copy [Sidekiq::Middleware::Chain] New instance of Chain
86
- # @returns nil
87
- def initialize_copy(copy)
88
- copy.instance_variable_set(:@entries, entries.dup)
89
- nil
90
- end
91
-
92
83
  # Iterate through each middleware in the chain
93
84
  def each(&block)
94
85
  entries.each(&block)
@@ -105,6 +96,12 @@ module Sidekiq
105
96
  @entries ||= []
106
97
  end
107
98
 
99
+ def copy_for(capsule)
100
+ chain = Sidekiq::Middleware::Chain.new(capsule)
101
+ chain.instance_variable_set(:@entries, entries.dup)
102
+ chain
103
+ end
104
+
108
105
  # Remove all middleware matching the given Class
109
106
  # @param klass [Class]
110
107
  def remove(klass)
@@ -152,6 +149,7 @@ module Sidekiq
152
149
  def exists?(klass)
153
150
  any? { |entry| entry.klass == klass }
154
151
  end
152
+ alias_method :include?, :exists?
155
153
 
156
154
  # @return [Boolean] if the chain contains no middleware
157
155
  def empty?
@@ -22,13 +22,11 @@ module Sidekiq
22
22
  end
23
23
 
24
24
  def call(_, job, _, _)
25
- attrs = @strklass.constantize.attributes
26
- if attrs.any?
27
- if job.has_key?("cattr")
28
- job["cattr"].merge!(attrs)
29
- else
30
- job["cattr"] = attrs
31
- end
25
+ if !job.has_key?("cattr")
26
+ attrs = @strklass.constantize.attributes
27
+ # Retries can push the job N times, we don't
28
+ # want retries to reset cattr. #5692, #5090
29
+ job["cattr"] = attrs if attrs.any?
32
30
  end
33
31
  yield
34
32
  end
@@ -50,14 +48,9 @@ module Sidekiq
50
48
  end
51
49
  end
52
50
 
53
- def self.persist(klass)
54
- Sidekiq.configure_client do |config|
55
- config.client_middleware.add Save, klass.to_s
56
- end
57
- Sidekiq.configure_server do |config|
58
- config.client_middleware.add Save, klass.to_s
59
- config.server_middleware.add Load, klass.to_s
60
- end
51
+ def self.persist(klass, config = Sidekiq.default_configuration)
52
+ config.client_middleware.add Save, klass.to_s
53
+ config.server_middleware.add Load, klass.to_s
61
54
  end
62
55
  end
63
56
  end
@@ -49,10 +49,25 @@ class Sidekiq::Monitor
49
49
  def processes
50
50
  puts "---- Processes (#{process_set.size}) ----"
51
51
  process_set.each_with_index do |process, index|
52
+ # Keep compatibility with legacy versions since we don't want to break sidekiqmon during rolling upgrades or downgrades.
53
+ #
54
+ # Before:
55
+ # ["default", "critical"]
56
+ #
57
+ # After:
58
+ # {"default" => 1, "critical" => 10}
59
+ queues =
60
+ if process["weights"]
61
+ process["weights"].sort_by { |queue| queue[0] }.map { |queue| queue.join(": ") }
62
+ else
63
+ process["queues"].sort
64
+ end
65
+
52
66
  puts "#{process["identity"]} #{tags_for(process)}"
53
67
  puts " Started: #{Time.at(process["started_at"])} (#{time_ago(process["started_at"])})"
54
68
  puts " Threads: #{process["concurrency"]} (#{process["busy"]} busy)"
55
- puts " Queues: #{split_multiline(process["queues"].sort, pad: 11)}"
69
+ puts " Queues: #{split_multiline(queues, pad: 11)}"
70
+ puts " Version: #{process["version"] || "Unknown"}" if process["version"] != Sidekiq::VERSION
56
71
  puts "" unless (index + 1) == process_set.size
57
72
  end
58
73
  end
@@ -101,7 +116,7 @@ class Sidekiq::Monitor
101
116
  tags = [
102
117
  process["tag"],
103
118
  process["labels"],
104
- (process["quiet"] == "true" ? "quiet" : nil)
119
+ ((process["quiet"] == "true") ? "quiet" : nil)
105
120
  ].flatten.compact
106
121
  tags.any? ? "[#{tags.join("] [")}]" : nil
107
122
  end
@@ -3,7 +3,7 @@
3
3
  module Sidekiq
4
4
  module Paginator
5
5
  def page(key, pageidx = 1, page_size = 25, opts = nil)
6
- current_page = pageidx.to_i < 1 ? 1 : pageidx.to_i
6
+ current_page = (pageidx.to_i < 1) ? 1 : pageidx.to_i
7
7
  pageidx = current_page - 1
8
8
  total_size = 0
9
9
  items = []
@@ -45,7 +45,7 @@ module Sidekiq
45
45
  end
46
46
 
47
47
  def page_items(items, pageidx = 1, page_size = 25)
48
- current_page = pageidx.to_i < 1 ? 1 : pageidx.to_i
48
+ current_page = (pageidx.to_i < 1) ? 1 : pageidx.to_i
49
49
  pageidx = current_page - 1
50
50
  starting = pageidx * page_size
51
51
  items = items.to_a