sidekiq 6.4.0 → 6.5.1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (52) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +54 -1
  3. data/README.md +6 -1
  4. data/bin/sidekiq +3 -3
  5. data/bin/sidekiqload +70 -66
  6. data/bin/sidekiqmon +1 -1
  7. data/lib/sidekiq/.DS_Store +0 -0
  8. data/lib/sidekiq/api.rb +109 -78
  9. data/lib/sidekiq/cli.rb +47 -38
  10. data/lib/sidekiq/client.rb +42 -28
  11. data/lib/sidekiq/component.rb +64 -0
  12. data/lib/sidekiq/delay.rb +2 -2
  13. data/lib/sidekiq/extensions/action_mailer.rb +2 -2
  14. data/lib/sidekiq/extensions/active_record.rb +2 -2
  15. data/lib/sidekiq/extensions/class_methods.rb +2 -2
  16. data/lib/sidekiq/extensions/generic_proxy.rb +3 -3
  17. data/lib/sidekiq/fetch.rb +18 -16
  18. data/lib/sidekiq/job_logger.rb +15 -27
  19. data/lib/sidekiq/job_retry.rb +29 -28
  20. data/lib/sidekiq/job_util.rb +15 -9
  21. data/lib/sidekiq/launcher.rb +54 -52
  22. data/lib/sidekiq/logger.rb +8 -18
  23. data/lib/sidekiq/manager.rb +28 -25
  24. data/lib/sidekiq/middleware/chain.rb +22 -13
  25. data/lib/sidekiq/middleware/current_attributes.rb +4 -0
  26. data/lib/sidekiq/middleware/i18n.rb +6 -4
  27. data/lib/sidekiq/middleware/modules.rb +21 -0
  28. data/lib/sidekiq/monitor.rb +1 -1
  29. data/lib/sidekiq/paginator.rb +8 -8
  30. data/lib/sidekiq/processor.rb +38 -38
  31. data/lib/sidekiq/rails.rb +15 -8
  32. data/lib/sidekiq/redis_client_adapter.rb +154 -0
  33. data/lib/sidekiq/redis_connection.rb +81 -48
  34. data/lib/sidekiq/ring_buffer.rb +29 -0
  35. data/lib/sidekiq/scheduled.rb +11 -10
  36. data/lib/sidekiq/testing/inline.rb +4 -4
  37. data/lib/sidekiq/testing.rb +37 -36
  38. data/lib/sidekiq/transaction_aware_client.rb +45 -0
  39. data/lib/sidekiq/version.rb +1 -1
  40. data/lib/sidekiq/web/csrf_protection.rb +2 -2
  41. data/lib/sidekiq/web/helpers.rb +5 -5
  42. data/lib/sidekiq/web.rb +3 -3
  43. data/lib/sidekiq/worker.rb +20 -17
  44. data/lib/sidekiq.rb +98 -30
  45. data/web/assets/javascripts/application.js +58 -26
  46. data/web/assets/stylesheets/application.css +1 -0
  47. data/web/locales/pt-br.yml +27 -9
  48. data/web/views/_summary.erb +1 -1
  49. data/web/views/busy.erb +3 -3
  50. metadata +9 -5
  51. data/lib/sidekiq/exception_handler.rb +0 -27
  52. data/lib/sidekiq/util.rb +0 -108
@@ -3,11 +3,12 @@
3
3
  require "sidekiq/manager"
4
4
  require "sidekiq/fetch"
5
5
  require "sidekiq/scheduled"
6
+ require "sidekiq/ring_buffer"
6
7
 
7
8
  module Sidekiq
8
9
  # The Launcher starts the Manager and Poller threads and provides the process heartbeat.
9
10
  class Launcher
10
- include Util
11
+ include Sidekiq::Component
11
12
 
12
13
  STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
13
14
 
@@ -15,18 +16,18 @@ module Sidekiq
15
16
  proc { "sidekiq" },
16
17
  proc { Sidekiq::VERSION },
17
18
  proc { |me, data| data["tag"] },
18
- proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
+ proc { |me, data| "[#{Processor::WORK_STATE.size} of #{data["concurrency"]} busy]" },
19
20
  proc { |me, data| "stopping" if me.stopping? }
20
21
  ]
21
22
 
22
23
  attr_accessor :manager, :poller, :fetcher
23
24
 
24
25
  def initialize(options)
26
+ @config = options
25
27
  options[:fetch] ||= BasicFetch.new(options)
26
28
  @manager = Sidekiq::Manager.new(options)
27
- @poller = Sidekiq::Scheduled::Poller.new
29
+ @poller = Sidekiq::Scheduled::Poller.new(options)
28
30
  @done = false
29
- @options = options
30
31
  end
31
32
 
32
33
  def run
@@ -43,11 +44,9 @@ module Sidekiq
43
44
  @poller.terminate
44
45
  end
45
46
 
46
- # Shuts down the process. This method does not
47
- # return until all work is complete and cleaned up.
48
- # It can take up to the timeout to complete.
47
+ # Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
49
48
  def stop
50
- deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
49
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
51
50
 
52
51
  @done = true
53
52
  @manager.quiet
@@ -55,10 +54,10 @@ module Sidekiq
55
54
 
56
55
  @manager.stop(deadline)
57
56
 
58
- # Requeue everything in case there was a worker who grabbed work while stopped
57
+ # Requeue everything in case there was a thread which fetched a job while the process was stopped.
59
58
  # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
60
- strategy = @options[:fetch]
61
- strategy.bulk_requeue([], @options)
59
+ strategy = @config[:fetch]
60
+ strategy.bulk_requeue([], @config)
62
61
 
63
62
  clear_heartbeat
64
63
  end
@@ -76,17 +75,17 @@ module Sidekiq
76
75
  heartbeat
77
76
  sleep BEAT_PAUSE
78
77
  end
79
- Sidekiq.logger.info("Heartbeat stopping...")
78
+ logger.info("Heartbeat stopping...")
80
79
  end
81
80
 
82
81
  def clear_heartbeat
83
82
  # Remove record from Redis since we are shutting down.
84
83
  # Note we don't stop the heartbeat thread; if the process
85
84
  # doesn't actually exit, it'll reappear in the Web UI.
86
- Sidekiq.redis do |conn|
87
- conn.pipelined do
88
- conn.srem("processes", identity)
89
- conn.unlink("#{identity}:workers")
85
+ redis do |conn|
86
+ conn.pipelined do |pipeline|
87
+ pipeline.srem("processes", identity)
88
+ pipeline.unlink("#{identity}:work")
90
89
  end
91
90
  end
92
91
  rescue
@@ -107,14 +106,14 @@ module Sidekiq
107
106
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
108
107
  begin
109
108
  Sidekiq.redis do |conn|
110
- conn.pipelined do
111
- conn.incrby("stat:processed", procd)
112
- conn.incrby("stat:processed:#{nowdate}", procd)
113
- conn.expire("stat:processed:#{nowdate}", STATS_TTL)
114
-
115
- conn.incrby("stat:failed", fails)
116
- conn.incrby("stat:failed:#{nowdate}", fails)
117
- conn.expire("stat:failed:#{nowdate}", STATS_TTL)
109
+ conn.pipelined do |pipeline|
110
+ pipeline.incrby("stat:processed", procd)
111
+ pipeline.incrby("stat:processed:#{nowdate}", procd)
112
+ pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
113
+
114
+ pipeline.incrby("stat:failed", fails)
115
+ pipeline.incrby("stat:failed:#{nowdate}", fails)
116
+ pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
118
117
  end
119
118
  end
120
119
  rescue => ex
@@ -132,26 +131,29 @@ module Sidekiq
132
131
  begin
133
132
  fails = Processor::FAILURE.reset
134
133
  procd = Processor::PROCESSED.reset
135
- curstate = Processor::WORKER_STATE.dup
134
+ curstate = Processor::WORK_STATE.dup
136
135
 
137
- workers_key = "#{key}:workers"
138
136
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
139
137
 
140
- Sidekiq.redis do |conn|
141
- conn.multi do
142
- conn.incrby("stat:processed", procd)
143
- conn.incrby("stat:processed:#{nowdate}", procd)
144
- conn.expire("stat:processed:#{nowdate}", STATS_TTL)
138
+ redis do |conn|
139
+ conn.multi do |transaction|
140
+ transaction.incrby("stat:processed", procd)
141
+ transaction.incrby("stat:processed:#{nowdate}", procd)
142
+ transaction.expire("stat:processed:#{nowdate}", STATS_TTL)
145
143
 
146
- conn.incrby("stat:failed", fails)
147
- conn.incrby("stat:failed:#{nowdate}", fails)
148
- conn.expire("stat:failed:#{nowdate}", STATS_TTL)
144
+ transaction.incrby("stat:failed", fails)
145
+ transaction.incrby("stat:failed:#{nowdate}", fails)
146
+ transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
147
+ end
149
148
 
150
- conn.unlink(workers_key)
149
+ # work is the current set of executing jobs
150
+ work_key = "#{key}:work"
151
+ conn.pipelined do |transaction|
152
+ transaction.unlink(work_key)
151
153
  curstate.each_pair do |tid, hash|
152
- conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
154
+ transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
153
155
  end
154
- conn.expire(workers_key, 60)
156
+ transaction.expire(work_key, 60)
155
157
  end
156
158
  end
157
159
 
@@ -160,18 +162,18 @@ module Sidekiq
160
162
  fails = procd = 0
161
163
  kb = memory_usage(::Process.pid)
162
164
 
163
- _, exists, _, _, msg = Sidekiq.redis { |conn|
164
- conn.multi {
165
- conn.sadd("processes", key)
166
- conn.exists?(key)
167
- conn.hmset(key, "info", to_json,
165
+ _, exists, _, _, msg = redis { |conn|
166
+ conn.multi { |transaction|
167
+ transaction.sadd("processes", key)
168
+ transaction.exists?(key)
169
+ transaction.hmset(key, "info", to_json,
168
170
  "busy", curstate.size,
169
171
  "beat", Time.now.to_f,
170
172
  "rtt_us", rtt,
171
- "quiet", @done,
173
+ "quiet", @done.to_s,
172
174
  "rss", kb)
173
- conn.expire(key, 60)
174
- conn.rpop("#{key}-signals")
175
+ transaction.expire(key, 60)
176
+ transaction.rpop("#{key}-signals")
175
177
  }
176
178
  }
177
179
 
@@ -198,7 +200,7 @@ module Sidekiq
198
200
 
199
201
  def check_rtt
200
202
  a = b = 0
201
- Sidekiq.redis do |x|
203
+ redis do |x|
202
204
  a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
203
205
  x.ping
204
206
  b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
@@ -209,12 +211,12 @@ module Sidekiq
209
211
  # Workable is < 10,000µs
210
212
  # Log a warning if it's a disaster.
211
213
  if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
212
- Sidekiq.logger.warn <<~EOM
214
+ logger.warn <<~EOM
213
215
  Your Redis network connection is performing extremely poorly.
214
216
  Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
215
217
  Ensure Redis is running in the same AZ or datacenter as Sidekiq.
216
218
  If these values are close to 100,000, that means your Sidekiq process may be
217
- CPU overloaded; see https://github.com/mperham/sidekiq/discussions/5039
219
+ CPU-saturated; reduce your concurrency and/or see https://github.com/mperham/sidekiq/discussions/5039
218
220
  EOM
219
221
  RTT_READINGS.reset
220
222
  end
@@ -246,10 +248,10 @@ module Sidekiq
246
248
  "hostname" => hostname,
247
249
  "started_at" => Time.now.to_f,
248
250
  "pid" => ::Process.pid,
249
- "tag" => @options[:tag] || "",
250
- "concurrency" => @options[:concurrency],
251
- "queues" => @options[:queues].uniq,
252
- "labels" => @options[:labels],
251
+ "tag" => @config[:tag] || "",
252
+ "concurrency" => @config[:concurrency],
253
+ "queues" => @config[:queues].uniq,
254
+ "labels" => @config[:labels],
253
255
  "identity" => identity
254
256
  }
255
257
  end
@@ -16,6 +16,10 @@ module Sidekiq
16
16
  def self.current
17
17
  Thread.current[:sidekiq_context] ||= {}
18
18
  end
19
+
20
+ def self.add(k, v)
21
+ current[k] = v
22
+ end
19
23
  end
20
24
 
21
25
  module LoggingUtils
@@ -31,24 +35,10 @@ module Sidekiq
31
35
  nil
32
36
  end
33
37
 
34
- def debug?
35
- level <= 0
36
- end
37
-
38
- def info?
39
- level <= 1
40
- end
41
-
42
- def warn?
43
- level <= 2
44
- end
45
-
46
- def error?
47
- level <= 3
48
- end
49
-
50
- def fatal?
51
- level <= 4
38
+ LEVELS.each do |level, numeric_level|
39
+ define_method("#{level}?") do
40
+ local_level.nil? ? super() : local_level <= numeric_level
41
+ end
52
42
  end
53
43
 
54
44
  def local_level
@@ -1,6 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "sidekiq/util"
4
3
  require "sidekiq/processor"
5
4
  require "sidekiq/fetch"
6
5
  require "set"
@@ -21,37 +20,34 @@ module Sidekiq
21
20
  # the shutdown process. The other tasks are performed by other threads.
22
21
  #
23
22
  class Manager
24
- include Util
23
+ include Sidekiq::Component
25
24
 
26
25
  attr_reader :workers
27
- attr_reader :options
28
26
 
29
27
  def initialize(options = {})
28
+ @config = options
30
29
  logger.debug { options.inspect }
31
- @options = options
32
30
  @count = options[:concurrency] || 10
33
31
  raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
34
32
 
35
33
  @done = false
36
34
  @workers = Set.new
37
35
  @count.times do
38
- @workers << Processor.new(self, options)
36
+ @workers << Processor.new(@config, &method(:processor_result))
39
37
  end
40
38
  @plock = Mutex.new
41
39
  end
42
40
 
43
41
  def start
44
- @workers.each do |x|
45
- x.start
46
- end
42
+ @workers.each(&:start)
47
43
  end
48
44
 
49
45
  def quiet
50
46
  return if @done
51
47
  @done = true
52
48
 
53
- logger.info { "Terminating quiet workers" }
54
- @workers.each { |x| x.terminate }
49
+ logger.info { "Terminating quiet threads" }
50
+ @workers.each(&:terminate)
55
51
  fire_event(:quiet, reverse: true)
56
52
  end
57
53
 
@@ -65,24 +61,18 @@ module Sidekiq
65
61
  sleep PAUSE_TIME
66
62
  return if @workers.empty?
67
63
 
68
- logger.info { "Pausing to allow workers to finish..." }
64
+ logger.info { "Pausing to allow jobs to finish..." }
69
65
  wait_for(deadline) { @workers.empty? }
70
66
  return if @workers.empty?
71
67
 
72
68
  hard_shutdown
73
69
  end
74
70
 
75
- def processor_stopped(processor)
76
- @plock.synchronize do
77
- @workers.delete(processor)
78
- end
79
- end
80
-
81
- def processor_died(processor, reason)
71
+ def processor_result(processor, reason = nil)
82
72
  @plock.synchronize do
83
73
  @workers.delete(processor)
84
74
  unless @done
85
- p = Processor.new(self, options)
75
+ p = Processor.new(@config, &method(:processor_result))
86
76
  @workers << p
87
77
  p.start
88
78
  end
@@ -96,7 +86,7 @@ module Sidekiq
96
86
  private
97
87
 
98
88
  def hard_shutdown
99
- # We've reached the timeout and we still have busy workers.
89
+ # We've reached the timeout and we still have busy threads.
100
90
  # They must die but their jobs shall live on.
101
91
  cleanup = nil
102
92
  @plock.synchronize do
@@ -106,17 +96,17 @@ module Sidekiq
106
96
  if cleanup.size > 0
107
97
  jobs = cleanup.map { |p| p.job }.compact
108
98
 
109
- logger.warn { "Terminating #{cleanup.size} busy worker threads" }
110
- logger.warn { "Work still in progress #{jobs.inspect}" }
99
+ logger.warn { "Terminating #{cleanup.size} busy threads" }
100
+ logger.debug { "Jobs still in progress #{jobs.inspect}" }
111
101
 
112
102
  # Re-enqueue unfinished jobs
113
103
  # NOTE: You may notice that we may push a job back to redis before
114
- # the worker thread is terminated. This is ok because Sidekiq's
104
+ # the thread is terminated. This is ok because Sidekiq's
115
105
  # contract says that jobs are run AT LEAST once. Process termination
116
106
  # is delayed until we're certain the jobs are back in Redis because
117
107
  # it is worse to lose a job than to run it twice.
118
- strategy = @options[:fetch]
119
- strategy.bulk_requeue(jobs, @options)
108
+ strategy = @config[:fetch]
109
+ strategy.bulk_requeue(jobs, @config)
120
110
  end
121
111
 
122
112
  cleanup.each do |processor|
@@ -129,5 +119,18 @@ module Sidekiq
129
119
  deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3
130
120
  wait_for(deadline) { @workers.empty? }
131
121
  end
122
+
123
+ # hack for quicker development / testing environment #2774
124
+ PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
125
+
126
+ # Wait for the orblock to be true or the deadline passed.
127
+ def wait_for(deadline, &condblock)
128
+ remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
129
+ while remaining > PAUSE_TIME
130
+ return if condblock.call
131
+ sleep PAUSE_TIME
132
+ remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
133
+ end
134
+ end
132
135
  end
133
136
  end
@@ -1,5 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require "sidekiq/middleware/modules"
4
+
3
5
  module Sidekiq
4
6
  # Middleware is code configured to run before/after
5
7
  # a message is processed. It is patterned after Rack
@@ -44,10 +46,12 @@ module Sidekiq
44
46
  # This is an example of a minimal server middleware:
45
47
  #
46
48
  # class MyServerHook
47
- # def call(worker_instance, msg, queue)
48
- # puts "Before work"
49
+ # include Sidekiq::ServerMiddleware
50
+ # def call(job_instance, msg, queue)
51
+ # logger.info "Before job"
52
+ # redis {|conn| conn.get("foo") } # do something in Redis
49
53
  # yield
50
- # puts "After work"
54
+ # logger.info "After job"
51
55
  # end
52
56
  # end
53
57
  #
@@ -56,10 +60,11 @@ module Sidekiq
56
60
  # to Redis:
57
61
  #
58
62
  # class MyClientHook
59
- # def call(worker_class, msg, queue, redis_pool)
60
- # puts "Before push"
63
+ # include Sidekiq::ClientMiddleware
64
+ # def call(job_class, msg, queue, redis_pool)
65
+ # logger.info "Before push"
61
66
  # result = yield
62
- # puts "After push"
67
+ # logger.info "After push"
63
68
  # result
64
69
  # end
65
70
  # end
@@ -76,7 +81,8 @@ module Sidekiq
76
81
  entries.each(&block)
77
82
  end
78
83
 
79
- def initialize
84
+ def initialize(config = nil)
85
+ @config = config
80
86
  @entries = nil
81
87
  yield self if block_given?
82
88
  end
@@ -91,24 +97,24 @@ module Sidekiq
91
97
 
92
98
  def add(klass, *args)
93
99
  remove(klass)
94
- entries << Entry.new(klass, *args)
100
+ entries << Entry.new(@config, klass, *args)
95
101
  end
96
102
 
97
103
  def prepend(klass, *args)
98
104
  remove(klass)
99
- entries.insert(0, Entry.new(klass, *args))
105
+ entries.insert(0, Entry.new(@config, klass, *args))
100
106
  end
101
107
 
102
108
  def insert_before(oldklass, newklass, *args)
103
109
  i = entries.index { |entry| entry.klass == newklass }
104
- new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
110
+ new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
105
111
  i = entries.index { |entry| entry.klass == oldklass } || 0
106
112
  entries.insert(i, new_entry)
107
113
  end
108
114
 
109
115
  def insert_after(oldklass, newklass, *args)
110
116
  i = entries.index { |entry| entry.klass == newklass }
111
- new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
117
+ new_entry = i.nil? ? Entry.new(@config, newklass, *args) : entries.delete_at(i)
112
118
  i = entries.index { |entry| entry.klass == oldklass } || entries.count - 1
113
119
  entries.insert(i + 1, new_entry)
114
120
  end
@@ -149,13 +155,16 @@ module Sidekiq
149
155
  class Entry
150
156
  attr_reader :klass
151
157
 
152
- def initialize(klass, *args)
158
+ def initialize(config, klass, *args)
159
+ @config = config
153
160
  @klass = klass
154
161
  @args = args
155
162
  end
156
163
 
157
164
  def make_new
158
- @klass.new(*@args)
165
+ x = @klass.new(*@args)
166
+ x.config = @config if @config && x.respond_to?(:config=)
167
+ x
159
168
  end
160
169
  end
161
170
  end
@@ -15,6 +15,8 @@ module Sidekiq
15
15
  #
16
16
  module CurrentAttributes
17
17
  class Save
18
+ include Sidekiq::ClientMiddleware
19
+
18
20
  def initialize(cattr)
19
21
  @klass = cattr
20
22
  end
@@ -31,6 +33,8 @@ module Sidekiq
31
33
  end
32
34
 
33
35
  class Load
36
+ include Sidekiq::ServerMiddleware
37
+
34
38
  def initialize(cattr)
35
39
  @klass = cattr
36
40
  end
@@ -10,16 +10,18 @@ module Sidekiq::Middleware::I18n
10
10
  # Get the current locale and store it in the message
11
11
  # to be sent to Sidekiq.
12
12
  class Client
13
- def call(_worker, msg, _queue, _redis)
14
- msg["locale"] ||= I18n.locale
13
+ include Sidekiq::ClientMiddleware
14
+ def call(_jobclass, job, _queue, _redis)
15
+ job["locale"] ||= I18n.locale
15
16
  yield
16
17
  end
17
18
  end
18
19
 
19
20
  # Pull the msg locale out and set the current thread to use it.
20
21
  class Server
21
- def call(_worker, msg, _queue, &block)
22
- I18n.with_locale(msg.fetch("locale", I18n.default_locale), &block)
22
+ include Sidekiq::ServerMiddleware
23
+ def call(_jobclass, job, _queue, &block)
24
+ I18n.with_locale(job.fetch("locale", I18n.default_locale), &block)
23
25
  end
24
26
  end
25
27
  end
@@ -0,0 +1,21 @@
1
+ module Sidekiq
2
+ # Server-side middleware must import this Module in order
3
+ # to get access to server resources during `call`.
4
+ module ServerMiddleware
5
+ attr_accessor :config
6
+ def redis_pool
7
+ config.redis_pool
8
+ end
9
+
10
+ def logger
11
+ config.logger
12
+ end
13
+
14
+ def redis(&block)
15
+ config.redis(&block)
16
+ end
17
+ end
18
+
19
+ # no difference for now
20
+ ClientMiddleware = ServerMiddleware
21
+ end
@@ -17,7 +17,7 @@ class Sidekiq::Monitor
17
17
  end
18
18
  send(section)
19
19
  rescue => e
20
- puts "Couldn't get status: #{e}"
20
+ abort "Couldn't get status: #{e}"
21
21
  end
22
22
 
23
23
  def all
@@ -16,22 +16,22 @@ module Sidekiq
16
16
 
17
17
  case type
18
18
  when "zset"
19
- total_size, items = conn.multi {
20
- conn.zcard(key)
19
+ total_size, items = conn.multi { |transaction|
20
+ transaction.zcard(key)
21
21
  if rev
22
- conn.zrevrange(key, starting, ending, with_scores: true)
22
+ transaction.zrevrange(key, starting, ending, withscores: true)
23
23
  else
24
- conn.zrange(key, starting, ending, with_scores: true)
24
+ transaction.zrange(key, starting, ending, withscores: true)
25
25
  end
26
26
  }
27
27
  [current_page, total_size, items]
28
28
  when "list"
29
- total_size, items = conn.multi {
30
- conn.llen(key)
29
+ total_size, items = conn.multi { |transaction|
30
+ transaction.llen(key)
31
31
  if rev
32
- conn.lrange(key, -ending - 1, -starting - 1)
32
+ transaction.lrange(key, -ending - 1, -starting - 1)
33
33
  else
34
- conn.lrange(key, starting, ending)
34
+ transaction.lrange(key, starting, ending)
35
35
  end
36
36
  }
37
37
  items.reverse! if rev