sidekiq 6.4.1 → 6.5.10

Sign up to get free protection for your applications and to get access to all the features.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +107 -1
  3. data/README.md +1 -1
  4. data/bin/sidekiqload +18 -12
  5. data/lib/sidekiq/api.rb +222 -71
  6. data/lib/sidekiq/cli.rb +51 -37
  7. data/lib/sidekiq/client.rb +27 -28
  8. data/lib/sidekiq/component.rb +65 -0
  9. data/lib/sidekiq/delay.rb +1 -1
  10. data/lib/sidekiq/extensions/generic_proxy.rb +1 -1
  11. data/lib/sidekiq/fetch.rb +18 -16
  12. data/lib/sidekiq/job_retry.rb +73 -52
  13. data/lib/sidekiq/job_util.rb +15 -9
  14. data/lib/sidekiq/launcher.rb +37 -33
  15. data/lib/sidekiq/logger.rb +5 -19
  16. data/lib/sidekiq/manager.rb +28 -25
  17. data/lib/sidekiq/metrics/deploy.rb +47 -0
  18. data/lib/sidekiq/metrics/query.rb +153 -0
  19. data/lib/sidekiq/metrics/shared.rb +94 -0
  20. data/lib/sidekiq/metrics/tracking.rb +134 -0
  21. data/lib/sidekiq/middleware/chain.rb +82 -38
  22. data/lib/sidekiq/middleware/current_attributes.rb +18 -12
  23. data/lib/sidekiq/middleware/i18n.rb +6 -4
  24. data/lib/sidekiq/middleware/modules.rb +21 -0
  25. data/lib/sidekiq/monitor.rb +2 -2
  26. data/lib/sidekiq/paginator.rb +11 -3
  27. data/lib/sidekiq/processor.rb +47 -41
  28. data/lib/sidekiq/rails.rb +25 -8
  29. data/lib/sidekiq/redis_client_adapter.rb +154 -0
  30. data/lib/sidekiq/redis_connection.rb +80 -49
  31. data/lib/sidekiq/ring_buffer.rb +29 -0
  32. data/lib/sidekiq/scheduled.rb +53 -24
  33. data/lib/sidekiq/testing/inline.rb +4 -4
  34. data/lib/sidekiq/testing.rb +37 -36
  35. data/lib/sidekiq/transaction_aware_client.rb +45 -0
  36. data/lib/sidekiq/version.rb +1 -1
  37. data/lib/sidekiq/web/action.rb +3 -3
  38. data/lib/sidekiq/web/application.rb +21 -5
  39. data/lib/sidekiq/web/csrf_protection.rb +2 -2
  40. data/lib/sidekiq/web/helpers.rb +20 -7
  41. data/lib/sidekiq/web.rb +5 -1
  42. data/lib/sidekiq/worker.rb +24 -16
  43. data/lib/sidekiq.rb +106 -31
  44. data/sidekiq.gemspec +2 -2
  45. data/web/assets/javascripts/application.js +59 -26
  46. data/web/assets/javascripts/chart.min.js +13 -0
  47. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  48. data/web/assets/javascripts/dashboard.js +0 -17
  49. data/web/assets/javascripts/graph.js +16 -0
  50. data/web/assets/javascripts/metrics.js +262 -0
  51. data/web/assets/stylesheets/application.css +45 -3
  52. data/web/locales/el.yml +43 -19
  53. data/web/locales/en.yml +7 -0
  54. data/web/locales/ja.yml +7 -0
  55. data/web/locales/pt-br.yml +27 -9
  56. data/web/locales/zh-cn.yml +36 -11
  57. data/web/locales/zh-tw.yml +32 -7
  58. data/web/views/_nav.erb +1 -1
  59. data/web/views/_summary.erb +1 -1
  60. data/web/views/busy.erb +9 -4
  61. data/web/views/dashboard.erb +1 -0
  62. data/web/views/metrics.erb +69 -0
  63. data/web/views/metrics_for_job.erb +87 -0
  64. data/web/views/queue.erb +5 -1
  65. metadata +34 -9
  66. data/lib/sidekiq/exception_handler.rb +0 -27
  67. data/lib/sidekiq/util.rb +0 -108
@@ -5,8 +5,79 @@ require "redis"
5
5
  require "uri"
6
6
 
7
7
  module Sidekiq
8
- class RedisConnection
8
+ module RedisConnection
9
+ class RedisAdapter
10
+ BaseError = Redis::BaseError
11
+ CommandError = Redis::CommandError
12
+
13
+ def initialize(options)
14
+ warn("Usage of the 'redis' gem within Sidekiq itself is deprecated, Sidekiq 7.0 will only use the new, simpler 'redis-client' gem", caller) if ENV["SIDEKIQ_REDIS_CLIENT"] == "1"
15
+ @options = options
16
+ end
17
+
18
+ def new_client
19
+ namespace = @options[:namespace]
20
+
21
+ client = Redis.new client_opts(@options)
22
+ if namespace
23
+ begin
24
+ require "redis/namespace"
25
+ Redis::Namespace.new(namespace, redis: client)
26
+ rescue LoadError
27
+ Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \
28
+ "Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.")
29
+ exit(-127)
30
+ end
31
+ else
32
+ client
33
+ end
34
+ end
35
+
36
+ private
37
+
38
+ def client_opts(options)
39
+ opts = options.dup
40
+ if opts[:namespace]
41
+ opts.delete(:namespace)
42
+ end
43
+
44
+ if opts[:network_timeout]
45
+ opts[:timeout] = opts[:network_timeout]
46
+ opts.delete(:network_timeout)
47
+ end
48
+
49
+ # Issue #3303, redis-rb will silently retry an operation.
50
+ # This can lead to duplicate jobs if Sidekiq::Client's LPUSH
51
+ # is performed twice but I believe this is much, much rarer
52
+ # than the reconnect silently fixing a problem; we keep it
53
+ # on by default.
54
+ opts[:reconnect_attempts] ||= 1
55
+
56
+ opts
57
+ end
58
+ end
59
+
60
+ @adapter = RedisAdapter
61
+
9
62
  class << self
63
+ attr_reader :adapter
64
+
65
+ # RedisConnection.adapter = :redis
66
+ # RedisConnection.adapter = :redis_client
67
+ def adapter=(adapter)
68
+ raise "no" if adapter == self
69
+ result = case adapter
70
+ when :redis
71
+ RedisAdapter
72
+ when Class
73
+ adapter
74
+ else
75
+ require "sidekiq/#{adapter}_adapter"
76
+ nil
77
+ end
78
+ @adapter = result if result
79
+ end
80
+
10
81
  def create(options = {})
11
82
  symbolized_options = options.transform_keys(&:to_sym)
12
83
 
@@ -19,26 +90,27 @@ module Sidekiq
19
90
  elsif Sidekiq.server?
20
91
  # Give ourselves plenty of connections. pool is lazy
21
92
  # so we won't create them until we need them.
22
- Sidekiq.options[:concurrency] + 5
93
+ Sidekiq[:concurrency] + 5
23
94
  elsif ENV["RAILS_MAX_THREADS"]
24
95
  Integer(ENV["RAILS_MAX_THREADS"])
25
96
  else
26
97
  5
27
98
  end
28
99
 
29
- verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server?
100
+ verify_sizing(size, Sidekiq[:concurrency]) if Sidekiq.server?
30
101
 
31
102
  pool_timeout = symbolized_options[:pool_timeout] || 1
32
103
  log_info(symbolized_options)
33
104
 
105
+ redis_config = adapter.new(symbolized_options)
34
106
  ConnectionPool.new(timeout: pool_timeout, size: size) do
35
- build_client(symbolized_options)
107
+ redis_config.new_client
36
108
  end
37
109
  end
38
110
 
39
111
  private
40
112
 
41
- # Sidekiq needs a lot of concurrent Redis connections.
113
+ # Sidekiq needs many concurrent Redis connections.
42
114
  #
43
115
  # We need a connection for each Processor.
44
116
  # We need a connection for Pro's real-time change listener
@@ -47,48 +119,7 @@ module Sidekiq
47
119
  # - enterprise's leader election
48
120
  # - enterprise's cron support
49
121
  def verify_sizing(size, concurrency)
50
- raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
51
- end
52
-
53
- def build_client(options)
54
- namespace = options[:namespace]
55
-
56
- client = Redis.new client_opts(options)
57
- if namespace
58
- begin
59
- require "redis/namespace"
60
- Redis::Namespace.new(namespace, redis: client)
61
- rescue LoadError
62
- Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \
63
- "Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.")
64
- exit(-127)
65
- end
66
- else
67
- client
68
- end
69
- end
70
-
71
- def client_opts(options)
72
- opts = options.dup
73
- if opts[:namespace]
74
- opts.delete(:namespace)
75
- end
76
-
77
- if opts[:network_timeout]
78
- opts[:timeout] = opts[:network_timeout]
79
- opts.delete(:network_timeout)
80
- end
81
-
82
- opts[:driver] ||= Redis::Connection.drivers.last || "ruby"
83
-
84
- # Issue #3303, redis-rb will silently retry an operation.
85
- # This can lead to duplicate jobs if Sidekiq::Client's LPUSH
86
- # is performed twice but I believe this is much, much rarer
87
- # than the reconnect silently fixing a problem; we keep it
88
- # on by default.
89
- opts[:reconnect_attempts] ||= 1
90
-
91
- opts
122
+ raise ArgumentError, "Your Redis connection pool is too small for Sidekiq. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
92
123
  end
93
124
 
94
125
  def log_info(options)
@@ -110,9 +141,9 @@ module Sidekiq
110
141
  sentinel[:password] = redacted if sentinel[:password]
111
142
  end
112
143
  if Sidekiq.server?
113
- Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with redis options #{scrubbed_options}")
144
+ Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with #{adapter.name} options #{scrubbed_options}")
114
145
  else
115
- Sidekiq.logger.debug("#{Sidekiq::NAME} client with redis options #{scrubbed_options}")
146
+ Sidekiq.logger.debug("#{Sidekiq::NAME} client with #{adapter.name} options #{scrubbed_options}")
116
147
  end
117
148
  end
118
149
 
@@ -0,0 +1,29 @@
1
+ require "forwardable"
2
+
3
+ module Sidekiq
4
+ class RingBuffer
5
+ include Enumerable
6
+ extend Forwardable
7
+ def_delegators :@buf, :[], :each, :size
8
+
9
+ def initialize(size, default = 0)
10
+ @size = size
11
+ @buf = Array.new(size, default)
12
+ @index = 0
13
+ end
14
+
15
+ def <<(element)
16
+ @buf[@index % @size] = element
17
+ @index += 1
18
+ element
19
+ end
20
+
21
+ def buffer
22
+ @buf
23
+ end
24
+
25
+ def reset(default = 0)
26
+ @buf.fill(default)
27
+ end
28
+ end
29
+ end
@@ -1,8 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "sidekiq"
4
- require "sidekiq/util"
5
- require "sidekiq/api"
4
+ require "sidekiq/component"
6
5
 
7
6
  module Sidekiq
8
7
  module Scheduled
@@ -52,8 +51,8 @@ module Sidekiq
52
51
  @lua_zpopbyscore_sha = raw_conn.script(:load, LUA_ZPOPBYSCORE)
53
52
  end
54
53
 
55
- conn.evalsha(@lua_zpopbyscore_sha, keys: keys, argv: argv)
56
- rescue Redis::CommandError => e
54
+ conn.evalsha(@lua_zpopbyscore_sha, keys, argv)
55
+ rescue RedisConnection.adapter::CommandError => e
57
56
  raise unless e.message.start_with?("NOSCRIPT")
58
57
 
59
58
  @lua_zpopbyscore_sha = nil
@@ -67,12 +66,13 @@ module Sidekiq
67
66
  # just pops the job back onto its original queue so the
68
67
  # workers can pick it up like any other job.
69
68
  class Poller
70
- include Util
69
+ include Sidekiq::Component
71
70
 
72
71
  INITIAL_WAIT = 10
73
72
 
74
- def initialize
75
- @enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
73
+ def initialize(options)
74
+ @config = options
75
+ @enq = (options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
76
76
  @sleeper = ConnectionPool::TimedStack.new
77
77
  @done = false
78
78
  @thread = nil
@@ -100,7 +100,7 @@ module Sidekiq
100
100
  enqueue
101
101
  wait
102
102
  end
103
- Sidekiq.logger.info("Scheduler exiting...")
103
+ logger.info("Scheduler exiting...")
104
104
  }
105
105
  end
106
106
 
@@ -147,13 +147,16 @@ module Sidekiq
147
147
  # As we run more processes, the scheduling interval average will approach an even spread
148
148
  # between 0 and poll interval so we don't need this artifical boost.
149
149
  #
150
- if process_count < 10
150
+ count = process_count
151
+ interval = poll_interval_average(count)
152
+
153
+ if count < 10
151
154
  # For small clusters, calculate a random interval that is ±50% the desired average.
152
- poll_interval_average * rand + poll_interval_average.to_f / 2
155
+ interval * rand + interval.to_f / 2
153
156
  else
154
157
  # With 10+ processes, we should have enough randomness to get decent polling
155
158
  # across the entire timespan
156
- poll_interval_average * rand
159
+ interval * rand
157
160
  end
158
161
  end
159
162
 
@@ -170,38 +173,64 @@ module Sidekiq
170
173
  # the same time: the thundering herd problem.
171
174
  #
172
175
  # We only do this if poll_interval_average is unset (the default).
173
- def poll_interval_average
174
- Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval
176
+ def poll_interval_average(count)
177
+ @config[:poll_interval_average] || scaled_poll_interval(count)
175
178
  end
176
179
 
177
180
  # Calculates an average poll interval based on the number of known Sidekiq processes.
178
181
  # This minimizes a single point of failure by dispersing check-ins but without taxing
179
182
  # Redis if you run many Sidekiq processes.
180
- def scaled_poll_interval
181
- process_count * Sidekiq.options[:average_scheduled_poll_interval]
183
+ def scaled_poll_interval(process_count)
184
+ process_count * @config[:average_scheduled_poll_interval]
182
185
  end
183
186
 
184
187
  def process_count
185
- # The work buried within Sidekiq::ProcessSet#cleanup can be
186
- # expensive at scale. Cut it down by 90% with this counter.
187
- # NB: This method is only called by the scheduler thread so we
188
- # don't need to worry about the thread safety of +=.
189
- pcount = Sidekiq::ProcessSet.new(@count_calls % 10 == 0).size
188
+ pcount = Sidekiq.redis { |conn| conn.scard("processes") }
190
189
  pcount = 1 if pcount == 0
191
- @count_calls += 1
192
190
  pcount
193
191
  end
194
192
 
193
+ # A copy of Sidekiq::ProcessSet#cleanup because server
194
+ # should never depend on sidekiq/api.
195
+ def cleanup
196
+ # dont run cleanup more than once per minute
197
+ return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) }
198
+
199
+ count = 0
200
+ Sidekiq.redis do |conn|
201
+ procs = conn.sscan_each("processes").to_a
202
+ heartbeats = conn.pipelined { |pipeline|
203
+ procs.each do |key|
204
+ pipeline.hget(key, "info")
205
+ end
206
+ }
207
+
208
+ # the hash named key has an expiry of 60 seconds.
209
+ # if it's not found, that means the process has not reported
210
+ # in to Redis and probably died.
211
+ to_prune = procs.select.with_index { |proc, i|
212
+ heartbeats[i].nil?
213
+ }
214
+ count = conn.srem("processes", to_prune) unless to_prune.empty?
215
+ end
216
+ count
217
+ end
218
+
195
219
  def initial_wait
196
- # Have all processes sleep between 5-15 seconds. 10 seconds
197
- # to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
220
+ # Have all processes sleep between 5-15 seconds. 10 seconds to give time for
221
+ # the heartbeat to register (if the poll interval is going to be calculated by the number
198
222
  # of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
199
223
  total = 0
200
- total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average]
224
+ total += INITIAL_WAIT unless @config[:poll_interval_average]
201
225
  total += (5 * rand)
202
226
 
203
227
  @sleeper.pop(total)
204
228
  rescue Timeout::Error
229
+ ensure
230
+ # periodically clean out the `processes` set in Redis which can collect
231
+ # references to dead processes over time. The process count affects how
232
+ # often we scan for scheduled jobs.
233
+ cleanup
205
234
  end
206
235
  end
207
236
  end
@@ -4,7 +4,7 @@ require "sidekiq/testing"
4
4
 
5
5
  ##
6
6
  # The Sidekiq inline infrastructure overrides perform_async so that it
7
- # actually calls perform instead. This allows workers to be run inline in a
7
+ # actually calls perform instead. This allows jobs to be run inline in a
8
8
  # testing environment.
9
9
  #
10
10
  # This is similar to `Resque.inline = true` functionality.
@@ -15,8 +15,8 @@ require "sidekiq/testing"
15
15
  #
16
16
  # $external_variable = 0
17
17
  #
18
- # class ExternalWorker
19
- # include Sidekiq::Worker
18
+ # class ExternalJob
19
+ # include Sidekiq::Job
20
20
  #
21
21
  # def perform
22
22
  # $external_variable = 1
@@ -24,7 +24,7 @@ require "sidekiq/testing"
24
24
  # end
25
25
  #
26
26
  # assert_equal 0, $external_variable
27
- # ExternalWorker.perform_async
27
+ # ExternalJob.perform_async
28
28
  # assert_equal 1, $external_variable
29
29
  #
30
30
  Sidekiq::Testing.inline!
@@ -101,20 +101,20 @@ module Sidekiq
101
101
  ##
102
102
  # The Queues class is only for testing the fake queue implementation.
103
103
  # There are 2 data structures involved in tandem. This is due to the
104
- # Rspec syntax of change(QueueWorker.jobs, :size). It keeps a reference
104
+ # Rspec syntax of change(HardJob.jobs, :size). It keeps a reference
105
105
  # to the array. Because the array was dervied from a filter of the total
106
106
  # jobs enqueued, it appeared as though the array didn't change.
107
107
  #
108
108
  # To solve this, we'll keep 2 hashes containing the jobs. One with keys based
109
- # on the queue, and another with keys of the worker names, so the array for
110
- # QueueWorker.jobs is a straight reference to a real array.
109
+ # on the queue, and another with keys of the job type, so the array for
110
+ # HardJob.jobs is a straight reference to a real array.
111
111
  #
112
112
  # Queue-based hash:
113
113
  #
114
114
  # {
115
115
  # "default"=>[
116
116
  # {
117
- # "class"=>"TestTesting::QueueWorker",
117
+ # "class"=>"TestTesting::HardJob",
118
118
  # "args"=>[1, 2],
119
119
  # "retry"=>true,
120
120
  # "queue"=>"default",
@@ -124,12 +124,12 @@ module Sidekiq
124
124
  # ]
125
125
  # }
126
126
  #
127
- # Worker-based hash:
127
+ # Job-based hash:
128
128
  #
129
129
  # {
130
- # "TestTesting::QueueWorker"=>[
130
+ # "TestTesting::HardJob"=>[
131
131
  # {
132
- # "class"=>"TestTesting::QueueWorker",
132
+ # "class"=>"TestTesting::HardJob",
133
133
  # "args"=>[1, 2],
134
134
  # "retry"=>true,
135
135
  # "queue"=>"default",
@@ -144,14 +144,14 @@ module Sidekiq
144
144
  # require 'sidekiq/testing'
145
145
  #
146
146
  # assert_equal 0, Sidekiq::Queues["default"].size
147
- # HardWorker.perform_async(:something)
147
+ # HardJob.perform_async(:something)
148
148
  # assert_equal 1, Sidekiq::Queues["default"].size
149
149
  # assert_equal :something, Sidekiq::Queues["default"].first['args'][0]
150
150
  #
151
- # You can also clear all workers' jobs:
151
+ # You can also clear all jobs:
152
152
  #
153
153
  # assert_equal 0, Sidekiq::Queues["default"].size
154
- # HardWorker.perform_async(:something)
154
+ # HardJob.perform_async(:something)
155
155
  # Sidekiq::Queues.clear_all
156
156
  # assert_equal 0, Sidekiq::Queues["default"].size
157
157
  #
@@ -170,35 +170,36 @@ module Sidekiq
170
170
 
171
171
  def push(queue, klass, job)
172
172
  jobs_by_queue[queue] << job
173
- jobs_by_worker[klass] << job
173
+ jobs_by_class[klass] << job
174
174
  end
175
175
 
176
176
  def jobs_by_queue
177
177
  @jobs_by_queue ||= Hash.new { |hash, key| hash[key] = [] }
178
178
  end
179
179
 
180
- def jobs_by_worker
181
- @jobs_by_worker ||= Hash.new { |hash, key| hash[key] = [] }
180
+ def jobs_by_class
181
+ @jobs_by_class ||= Hash.new { |hash, key| hash[key] = [] }
182
182
  end
183
+ alias_method :jobs_by_worker, :jobs_by_class
183
184
 
184
185
  def delete_for(jid, queue, klass)
185
186
  jobs_by_queue[queue.to_s].delete_if { |job| job["jid"] == jid }
186
- jobs_by_worker[klass].delete_if { |job| job["jid"] == jid }
187
+ jobs_by_class[klass].delete_if { |job| job["jid"] == jid }
187
188
  end
188
189
 
189
190
  def clear_for(queue, klass)
190
- jobs_by_queue[queue].clear
191
- jobs_by_worker[klass].clear
191
+ jobs_by_queue[queue.to_s].clear
192
+ jobs_by_class[klass].clear
192
193
  end
193
194
 
194
195
  def clear_all
195
196
  jobs_by_queue.clear
196
- jobs_by_worker.clear
197
+ jobs_by_class.clear
197
198
  end
198
199
  end
199
200
  end
200
201
 
201
- module Worker
202
+ module Job
202
203
  ##
203
204
  # The Sidekiq testing infrastructure overrides perform_async
204
205
  # so that it does not actually touch the network. Instead it
@@ -212,16 +213,16 @@ module Sidekiq
212
213
  #
213
214
  # require 'sidekiq/testing'
214
215
  #
215
- # assert_equal 0, HardWorker.jobs.size
216
- # HardWorker.perform_async(:something)
217
- # assert_equal 1, HardWorker.jobs.size
218
- # assert_equal :something, HardWorker.jobs[0]['args'][0]
216
+ # assert_equal 0, HardJob.jobs.size
217
+ # HardJob.perform_async(:something)
218
+ # assert_equal 1, HardJob.jobs.size
219
+ # assert_equal :something, HardJob.jobs[0]['args'][0]
219
220
  #
220
221
  # assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
221
222
  # MyMailer.delay.send_welcome_email('foo@example.com')
222
223
  # assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
223
224
  #
224
- # You can also clear and drain all workers' jobs:
225
+ # You can also clear and drain all job types:
225
226
  #
226
227
  # assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
227
228
  # assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
@@ -241,14 +242,14 @@ module Sidekiq
241
242
  #
242
243
  # RSpec.configure do |config|
243
244
  # config.before(:each) do
244
- # Sidekiq::Worker.clear_all
245
+ # Sidekiq::Job.clear_all
245
246
  # end
246
247
  # end
247
248
  #
248
249
  # or for acceptance testing, i.e. with cucumber:
249
250
  #
250
251
  # AfterStep do
251
- # Sidekiq::Worker.drain_all
252
+ # Sidekiq::Job.drain_all
252
253
  # end
253
254
  #
254
255
  # When I sign up as "foo@example.com"
@@ -262,7 +263,7 @@ module Sidekiq
262
263
 
263
264
  # Jobs queued for this worker
264
265
  def jobs
265
- Queues.jobs_by_worker[to_s]
266
+ Queues.jobs_by_class[to_s]
266
267
  end
267
268
 
268
269
  # Clear all jobs for this worker
@@ -288,11 +289,11 @@ module Sidekiq
288
289
  end
289
290
 
290
291
  def process_job(job)
291
- worker = new
292
- worker.jid = job["jid"]
293
- worker.bid = job["bid"] if worker.respond_to?(:bid=)
294
- Sidekiq::Testing.server_middleware.invoke(worker, job, job["queue"]) do
295
- execute_job(worker, job["args"])
292
+ inst = new
293
+ inst.jid = job["jid"]
294
+ inst.bid = job["bid"] if inst.respond_to?(:bid=)
295
+ Sidekiq::Testing.server_middleware.invoke(inst, job, job["queue"]) do
296
+ execute_job(inst, job["args"])
296
297
  end
297
298
  end
298
299
 
@@ -306,18 +307,18 @@ module Sidekiq
306
307
  Queues.jobs_by_queue.values.flatten
307
308
  end
308
309
 
309
- # Clear all queued jobs across all workers
310
+ # Clear all queued jobs
310
311
  def clear_all
311
312
  Queues.clear_all
312
313
  end
313
314
 
314
- # Drain all queued jobs across all workers
315
+ # Drain (execute) all queued jobs
315
316
  def drain_all
316
317
  while jobs.any?
317
- worker_classes = jobs.map { |job| job["class"] }.uniq
318
+ job_classes = jobs.map { |job| job["class"] }.uniq
318
319
 
319
- worker_classes.each do |worker_class|
320
- Sidekiq::Testing.constantize(worker_class).drain
320
+ job_classes.each do |job_class|
321
+ Sidekiq::Testing.constantize(job_class).drain
321
322
  end
322
323
  end
323
324
  end
@@ -0,0 +1,45 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "securerandom"
4
+ require "sidekiq/client"
5
+
6
+ module Sidekiq
7
+ class TransactionAwareClient
8
+ def initialize(redis_pool)
9
+ @redis_client = Client.new(redis_pool)
10
+ end
11
+
12
+ def push(item)
13
+ # pre-allocate the JID so we can return it immediately and
14
+ # save it to the database as part of the transaction.
15
+ item["jid"] ||= SecureRandom.hex(12)
16
+ AfterCommitEverywhere.after_commit { @redis_client.push(item) }
17
+ item["jid"]
18
+ end
19
+
20
+ ##
21
+ # We don't provide transactionality for push_bulk because we don't want
22
+ # to hold potentially hundreds of thousands of job records in memory due to
23
+ # a long running enqueue process.
24
+ def push_bulk(items)
25
+ @redis_client.push_bulk(items)
26
+ end
27
+ end
28
+ end
29
+
30
+ ##
31
+ # Use `Sidekiq.transactional_push!` in your sidekiq.rb initializer
32
+ module Sidekiq
33
+ def self.transactional_push!
34
+ begin
35
+ require "after_commit_everywhere"
36
+ rescue LoadError
37
+ Sidekiq.logger.error("You need to add after_commit_everywhere to your Gemfile to use Sidekiq's transactional client")
38
+ raise
39
+ end
40
+
41
+ default_job_options["client_class"] = Sidekiq::TransactionAwareClient
42
+ Sidekiq::JobUtil::TRANSIENT_ATTRIBUTES << "client_class"
43
+ true
44
+ end
45
+ end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Sidekiq
4
- VERSION = "6.4.1"
4
+ VERSION = "6.5.10"
5
5
  end
@@ -15,11 +15,11 @@ module Sidekiq
15
15
  end
16
16
 
17
17
  def halt(res)
18
- throw :halt, [res, {"Content-Type" => "text/plain"}, [res.to_s]]
18
+ throw :halt, [res, {"content-type" => "text/plain"}, [res.to_s]]
19
19
  end
20
20
 
21
21
  def redirect(location)
22
- throw :halt, [302, {"Location" => "#{request.base_url}#{location}"}, []]
22
+ throw :halt, [302, {"location" => "#{request.base_url}#{location}"}, []]
23
23
  end
24
24
 
25
25
  def params
@@ -68,7 +68,7 @@ module Sidekiq
68
68
  end
69
69
 
70
70
  def json(payload)
71
- [200, {"Content-Type" => "application/json", "Cache-Control" => "private, no-store"}, [Sidekiq.dump_json(payload)]]
71
+ [200, {"content-type" => "application/json", "cache-control" => "private, no-store"}, [Sidekiq.dump_json(payload)]]
72
72
  end
73
73
 
74
74
  def initialize(env, block)