sidekiq 6.1.1 → 6.5.7

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (118) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +230 -3
  3. data/LICENSE +3 -3
  4. data/README.md +10 -6
  5. data/bin/sidekiq +3 -3
  6. data/bin/sidekiqload +70 -66
  7. data/bin/sidekiqmon +1 -1
  8. data/lib/generators/sidekiq/job_generator.rb +57 -0
  9. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  10. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  11. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  12. data/lib/sidekiq/api.rb +335 -146
  13. data/lib/sidekiq/cli.rb +74 -41
  14. data/lib/sidekiq/client.rb +48 -72
  15. data/lib/sidekiq/{util.rb → component.rb} +12 -14
  16. data/lib/sidekiq/delay.rb +3 -1
  17. data/lib/sidekiq/extensions/action_mailer.rb +3 -2
  18. data/lib/sidekiq/extensions/active_record.rb +1 -1
  19. data/lib/sidekiq/extensions/generic_proxy.rb +4 -2
  20. data/lib/sidekiq/fetch.rb +31 -20
  21. data/lib/sidekiq/job.rb +13 -0
  22. data/lib/sidekiq/job_logger.rb +16 -28
  23. data/lib/sidekiq/job_retry.rb +79 -59
  24. data/lib/sidekiq/job_util.rb +71 -0
  25. data/lib/sidekiq/launcher.rb +126 -65
  26. data/lib/sidekiq/logger.rb +11 -20
  27. data/lib/sidekiq/manager.rb +35 -34
  28. data/lib/sidekiq/metrics/deploy.rb +47 -0
  29. data/lib/sidekiq/metrics/query.rb +153 -0
  30. data/lib/sidekiq/metrics/shared.rb +94 -0
  31. data/lib/sidekiq/metrics/tracking.rb +134 -0
  32. data/lib/sidekiq/middleware/chain.rb +88 -42
  33. data/lib/sidekiq/middleware/current_attributes.rb +63 -0
  34. data/lib/sidekiq/middleware/i18n.rb +6 -4
  35. data/lib/sidekiq/middleware/modules.rb +21 -0
  36. data/lib/sidekiq/monitor.rb +1 -1
  37. data/lib/sidekiq/paginator.rb +8 -8
  38. data/lib/sidekiq/processor.rb +47 -41
  39. data/lib/sidekiq/rails.rb +22 -4
  40. data/lib/sidekiq/redis_client_adapter.rb +154 -0
  41. data/lib/sidekiq/redis_connection.rb +84 -55
  42. data/lib/sidekiq/ring_buffer.rb +29 -0
  43. data/lib/sidekiq/scheduled.rb +96 -32
  44. data/lib/sidekiq/testing/inline.rb +4 -4
  45. data/lib/sidekiq/testing.rb +38 -39
  46. data/lib/sidekiq/transaction_aware_client.rb +45 -0
  47. data/lib/sidekiq/version.rb +1 -1
  48. data/lib/sidekiq/web/action.rb +3 -3
  49. data/lib/sidekiq/web/application.rb +38 -16
  50. data/lib/sidekiq/web/csrf_protection.rb +32 -5
  51. data/lib/sidekiq/web/helpers.rb +60 -28
  52. data/lib/sidekiq/web/router.rb +4 -1
  53. data/lib/sidekiq/web.rb +38 -78
  54. data/lib/sidekiq/worker.rb +140 -14
  55. data/lib/sidekiq.rb +114 -31
  56. data/sidekiq.gemspec +12 -4
  57. data/web/assets/images/apple-touch-icon.png +0 -0
  58. data/web/assets/javascripts/application.js +113 -60
  59. data/web/assets/javascripts/chart.min.js +13 -0
  60. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  61. data/web/assets/javascripts/dashboard.js +50 -67
  62. data/web/assets/javascripts/graph.js +16 -0
  63. data/web/assets/javascripts/metrics.js +262 -0
  64. data/web/assets/stylesheets/application-dark.css +61 -51
  65. data/web/assets/stylesheets/application-rtl.css +0 -4
  66. data/web/assets/stylesheets/application.css +84 -243
  67. data/web/locales/ar.yml +8 -2
  68. data/web/locales/el.yml +43 -19
  69. data/web/locales/en.yml +11 -1
  70. data/web/locales/es.yml +18 -2
  71. data/web/locales/fr.yml +8 -1
  72. data/web/locales/ja.yml +10 -0
  73. data/web/locales/lt.yml +1 -1
  74. data/web/locales/pt-br.yml +27 -9
  75. data/web/locales/ru.yml +4 -0
  76. data/web/locales/zh-cn.yml +36 -11
  77. data/web/locales/zh-tw.yml +32 -7
  78. data/web/views/_footer.erb +1 -1
  79. data/web/views/_job_info.erb +1 -1
  80. data/web/views/_nav.erb +1 -1
  81. data/web/views/_poll_link.erb +2 -5
  82. data/web/views/_summary.erb +7 -7
  83. data/web/views/busy.erb +52 -21
  84. data/web/views/dashboard.erb +23 -14
  85. data/web/views/dead.erb +1 -1
  86. data/web/views/layout.erb +2 -1
  87. data/web/views/metrics.erb +69 -0
  88. data/web/views/metrics_for_job.erb +87 -0
  89. data/web/views/morgue.erb +6 -6
  90. data/web/views/queue.erb +15 -11
  91. data/web/views/queues.erb +4 -4
  92. data/web/views/retries.erb +7 -7
  93. data/web/views/retry.erb +1 -1
  94. data/web/views/scheduled.erb +1 -1
  95. metadata +46 -39
  96. data/.circleci/config.yml +0 -71
  97. data/.github/contributing.md +0 -32
  98. data/.github/issue_template.md +0 -11
  99. data/.gitignore +0 -13
  100. data/.standard.yml +0 -20
  101. data/3.0-Upgrade.md +0 -70
  102. data/4.0-Upgrade.md +0 -53
  103. data/5.0-Upgrade.md +0 -56
  104. data/6.0-Upgrade.md +0 -72
  105. data/COMM-LICENSE +0 -97
  106. data/Ent-2.0-Upgrade.md +0 -37
  107. data/Ent-Changes.md +0 -275
  108. data/Gemfile +0 -24
  109. data/Gemfile.lock +0 -208
  110. data/Pro-2.0-Upgrade.md +0 -138
  111. data/Pro-3.0-Upgrade.md +0 -44
  112. data/Pro-4.0-Upgrade.md +0 -35
  113. data/Pro-5.0-Upgrade.md +0 -25
  114. data/Pro-Changes.md +0 -795
  115. data/Rakefile +0 -10
  116. data/code_of_conduct.md +0 -50
  117. data/lib/generators/sidekiq/worker_generator.rb +0 -57
  118. data/lib/sidekiq/exception_handler.rb +0 -27
@@ -5,8 +5,79 @@ require "redis"
5
5
  require "uri"
6
6
 
7
7
  module Sidekiq
8
- class RedisConnection
8
+ module RedisConnection
9
+ class RedisAdapter
10
+ BaseError = Redis::BaseError
11
+ CommandError = Redis::CommandError
12
+
13
+ def initialize(options)
14
+ warn("Usage of the 'redis' gem within Sidekiq itself is deprecated, Sidekiq 7.0 will only use the new, simpler 'redis-client' gem", caller) if ENV["SIDEKIQ_REDIS_CLIENT"] == "1"
15
+ @options = options
16
+ end
17
+
18
+ def new_client
19
+ namespace = @options[:namespace]
20
+
21
+ client = Redis.new client_opts(@options)
22
+ if namespace
23
+ begin
24
+ require "redis/namespace"
25
+ Redis::Namespace.new(namespace, redis: client)
26
+ rescue LoadError
27
+ Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \
28
+ "Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.")
29
+ exit(-127)
30
+ end
31
+ else
32
+ client
33
+ end
34
+ end
35
+
36
+ private
37
+
38
+ def client_opts(options)
39
+ opts = options.dup
40
+ if opts[:namespace]
41
+ opts.delete(:namespace)
42
+ end
43
+
44
+ if opts[:network_timeout]
45
+ opts[:timeout] = opts[:network_timeout]
46
+ opts.delete(:network_timeout)
47
+ end
48
+
49
+ # Issue #3303, redis-rb will silently retry an operation.
50
+ # This can lead to duplicate jobs if Sidekiq::Client's LPUSH
51
+ # is performed twice but I believe this is much, much rarer
52
+ # than the reconnect silently fixing a problem; we keep it
53
+ # on by default.
54
+ opts[:reconnect_attempts] ||= 1
55
+
56
+ opts
57
+ end
58
+ end
59
+
60
+ @adapter = RedisAdapter
61
+
9
62
  class << self
63
+ attr_reader :adapter
64
+
65
+ # RedisConnection.adapter = :redis
66
+ # RedisConnection.adapter = :redis_client
67
+ def adapter=(adapter)
68
+ raise "no" if adapter == self
69
+ result = case adapter
70
+ when :redis
71
+ RedisAdapter
72
+ when Class
73
+ adapter
74
+ else
75
+ require "sidekiq/#{adapter}_adapter"
76
+ nil
77
+ end
78
+ @adapter = result if result
79
+ end
80
+
10
81
  def create(options = {})
11
82
  symbolized_options = options.transform_keys(&:to_sym)
12
83
 
@@ -19,26 +90,27 @@ module Sidekiq
19
90
  elsif Sidekiq.server?
20
91
  # Give ourselves plenty of connections. pool is lazy
21
92
  # so we won't create them until we need them.
22
- Sidekiq.options[:concurrency] + 5
93
+ Sidekiq[:concurrency] + 5
23
94
  elsif ENV["RAILS_MAX_THREADS"]
24
95
  Integer(ENV["RAILS_MAX_THREADS"])
25
96
  else
26
97
  5
27
98
  end
28
99
 
29
- verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server?
100
+ verify_sizing(size, Sidekiq[:concurrency]) if Sidekiq.server?
30
101
 
31
102
  pool_timeout = symbolized_options[:pool_timeout] || 1
32
103
  log_info(symbolized_options)
33
104
 
105
+ redis_config = adapter.new(symbolized_options)
34
106
  ConnectionPool.new(timeout: pool_timeout, size: size) do
35
- build_client(symbolized_options)
107
+ redis_config.new_client
36
108
  end
37
109
  end
38
110
 
39
111
  private
40
112
 
41
- # Sidekiq needs a lot of concurrent Redis connections.
113
+ # Sidekiq needs many concurrent Redis connections.
42
114
  #
43
115
  # We need a connection for each Processor.
44
116
  # We need a connection for Pro's real-time change listener
@@ -47,59 +119,16 @@ module Sidekiq
47
119
  # - enterprise's leader election
48
120
  # - enterprise's cron support
49
121
  def verify_sizing(size, concurrency)
50
- raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
51
- end
52
-
53
- def build_client(options)
54
- namespace = options[:namespace]
55
-
56
- client = Redis.new client_opts(options)
57
- if namespace
58
- begin
59
- require "redis/namespace"
60
- Redis::Namespace.new(namespace, redis: client)
61
- rescue LoadError
62
- Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \
63
- "Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.")
64
- exit(-127)
65
- end
66
- else
67
- client
68
- end
69
- end
70
-
71
- def client_opts(options)
72
- opts = options.dup
73
- if opts[:namespace]
74
- opts.delete(:namespace)
75
- end
76
-
77
- if opts[:network_timeout]
78
- opts[:timeout] = opts[:network_timeout]
79
- opts.delete(:network_timeout)
80
- end
81
-
82
- opts[:driver] ||= Redis::Connection.drivers.last || "ruby"
83
-
84
- # Issue #3303, redis-rb will silently retry an operation.
85
- # This can lead to duplicate jobs if Sidekiq::Client's LPUSH
86
- # is performed twice but I believe this is much, much rarer
87
- # than the reconnect silently fixing a problem; we keep it
88
- # on by default.
89
- opts[:reconnect_attempts] ||= 1
90
-
91
- opts
122
+ raise ArgumentError, "Your Redis connection pool is too small for Sidekiq. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
92
123
  end
93
124
 
94
125
  def log_info(options)
95
126
  redacted = "REDACTED"
96
127
 
97
- # deep clone so we can muck with these options all we want
98
- #
99
- # exclude SSL params from dump-and-load because some information isn't
100
- # safely dumpable in current Rubies
101
- keys = options.keys
102
- keys.delete(:ssl_params)
128
+ # Deep clone so we can muck with these options all we want and exclude
129
+ # params from dump-and-load that may contain objects that Marshal is
130
+ # unable to safely dump.
131
+ keys = options.keys - [:logger, :ssl_params]
103
132
  scrubbed_options = Marshal.load(Marshal.dump(options.slice(*keys)))
104
133
  if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
105
134
  uri.password = redacted
@@ -112,9 +141,9 @@ module Sidekiq
112
141
  sentinel[:password] = redacted if sentinel[:password]
113
142
  end
114
143
  if Sidekiq.server?
115
- Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with redis options #{scrubbed_options}")
144
+ Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with #{adapter.name} options #{scrubbed_options}")
116
145
  else
117
- Sidekiq.logger.debug("#{Sidekiq::NAME} client with redis options #{scrubbed_options}")
146
+ Sidekiq.logger.debug("#{Sidekiq::NAME} client with #{adapter.name} options #{scrubbed_options}")
118
147
  end
119
148
  end
120
149
 
@@ -0,0 +1,29 @@
1
+ require "forwardable"
2
+
3
+ module Sidekiq
4
+ class RingBuffer
5
+ include Enumerable
6
+ extend Forwardable
7
+ def_delegators :@buf, :[], :each, :size
8
+
9
+ def initialize(size, default = 0)
10
+ @size = size
11
+ @buf = Array.new(size, default)
12
+ @index = 0
13
+ end
14
+
15
+ def <<(element)
16
+ @buf[@index % @size] = element
17
+ @index += 1
18
+ element
19
+ end
20
+
21
+ def buffer
22
+ @buf
23
+ end
24
+
25
+ def reset(default = 0)
26
+ @buf.fill(default)
27
+ end
28
+ end
29
+ end
@@ -1,37 +1,63 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "sidekiq"
4
- require "sidekiq/util"
5
- require "sidekiq/api"
4
+ require "sidekiq/component"
6
5
 
7
6
  module Sidekiq
8
7
  module Scheduled
9
8
  SETS = %w[retry schedule]
10
9
 
11
10
  class Enq
12
- def enqueue_jobs(now = Time.now.to_f.to_s, sorted_sets = SETS)
11
+ LUA_ZPOPBYSCORE = <<~LUA
12
+ local key, now = KEYS[1], ARGV[1]
13
+ local jobs = redis.call("zrangebyscore", key, "-inf", now, "limit", 0, 1)
14
+ if jobs[1] then
15
+ redis.call("zrem", key, jobs[1])
16
+ return jobs[1]
17
+ end
18
+ LUA
19
+
20
+ def initialize
21
+ @done = false
22
+ @lua_zpopbyscore_sha = nil
23
+ end
24
+
25
+ def enqueue_jobs(sorted_sets = SETS)
13
26
  # A job's "score" in Redis is the time at which it should be processed.
14
27
  # Just check Redis for the set of jobs with a timestamp before now.
15
28
  Sidekiq.redis do |conn|
16
29
  sorted_sets.each do |sorted_set|
17
- # Get next items in the queue with scores (time to execute) <= now.
18
- until (jobs = conn.zrangebyscore(sorted_set, "-inf", now, limit: [0, 100])).empty?
19
- # We need to go through the list one at a time to reduce the risk of something
20
- # going wrong between the time jobs are popped from the scheduled queue and when
21
- # they are pushed onto a work queue and losing the jobs.
22
- jobs.each do |job|
23
- # Pop item off the queue and add it to the work queue. If the job can't be popped from
24
- # the queue, it's because another process already popped it so we can move on to the
25
- # next one.
26
- if conn.zrem(sorted_set, job)
27
- Sidekiq::Client.push(Sidekiq.load_json(job))
28
- Sidekiq.logger.debug { "enqueued #{sorted_set}: #{job}" }
29
- end
30
- end
30
+ # Get next item in the queue with score (time to execute) <= now.
31
+ # We need to go through the list one at a time to reduce the risk of something
32
+ # going wrong between the time jobs are popped from the scheduled queue and when
33
+ # they are pushed onto a work queue and losing the jobs.
34
+ while !@done && (job = zpopbyscore(conn, keys: [sorted_set], argv: [Time.now.to_f.to_s]))
35
+ Sidekiq::Client.push(Sidekiq.load_json(job))
36
+ Sidekiq.logger.debug { "enqueued #{sorted_set}: #{job}" }
31
37
  end
32
38
  end
33
39
  end
34
40
  end
41
+
42
+ def terminate
43
+ @done = true
44
+ end
45
+
46
+ private
47
+
48
+ def zpopbyscore(conn, keys: nil, argv: nil)
49
+ if @lua_zpopbyscore_sha.nil?
50
+ raw_conn = conn.respond_to?(:redis) ? conn.redis : conn
51
+ @lua_zpopbyscore_sha = raw_conn.script(:load, LUA_ZPOPBYSCORE)
52
+ end
53
+
54
+ conn.evalsha(@lua_zpopbyscore_sha, keys, argv)
55
+ rescue RedisConnection.adapter::CommandError => e
56
+ raise unless e.message.start_with?("NOSCRIPT")
57
+
58
+ @lua_zpopbyscore_sha = nil
59
+ retry
60
+ end
35
61
  end
36
62
 
37
63
  ##
@@ -40,20 +66,24 @@ module Sidekiq
40
66
  # just pops the job back onto its original queue so the
41
67
  # workers can pick it up like any other job.
42
68
  class Poller
43
- include Util
69
+ include Sidekiq::Component
44
70
 
45
71
  INITIAL_WAIT = 10
46
72
 
47
- def initialize
48
- @enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
73
+ def initialize(options)
74
+ @config = options
75
+ @enq = (options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
49
76
  @sleeper = ConnectionPool::TimedStack.new
50
77
  @done = false
51
78
  @thread = nil
79
+ @count_calls = 0
52
80
  end
53
81
 
54
82
  # Shut down this instance, will pause until the thread is dead.
55
83
  def terminate
56
84
  @done = true
85
+ @enq.terminate if @enq.respond_to?(:terminate)
86
+
57
87
  if @thread
58
88
  t = @thread
59
89
  @thread = nil
@@ -70,7 +100,7 @@ module Sidekiq
70
100
  enqueue
71
101
  wait
72
102
  end
73
- Sidekiq.logger.info("Scheduler exiting...")
103
+ logger.info("Scheduler exiting...")
74
104
  }
75
105
  end
76
106
 
@@ -117,13 +147,16 @@ module Sidekiq
117
147
  # As we run more processes, the scheduling interval average will approach an even spread
118
148
  # between 0 and poll interval so we don't need this artifical boost.
119
149
  #
120
- if process_count < 10
150
+ count = process_count
151
+ interval = poll_interval_average(count)
152
+
153
+ if count < 10
121
154
  # For small clusters, calculate a random interval that is ±50% the desired average.
122
- poll_interval_average * rand + poll_interval_average.to_f / 2
155
+ interval * rand + interval.to_f / 2
123
156
  else
124
157
  # With 10+ processes, we should have enough randomness to get decent polling
125
158
  # across the entire timespan
126
- poll_interval_average * rand
159
+ interval * rand
127
160
  end
128
161
  end
129
162
 
@@ -140,33 +173,64 @@ module Sidekiq
140
173
  # the same time: the thundering herd problem.
141
174
  #
142
175
  # We only do this if poll_interval_average is unset (the default).
143
- def poll_interval_average
144
- Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval
176
+ def poll_interval_average(count)
177
+ @config[:poll_interval_average] || scaled_poll_interval(count)
145
178
  end
146
179
 
147
180
  # Calculates an average poll interval based on the number of known Sidekiq processes.
148
181
  # This minimizes a single point of failure by dispersing check-ins but without taxing
149
182
  # Redis if you run many Sidekiq processes.
150
- def scaled_poll_interval
151
- process_count * Sidekiq.options[:average_scheduled_poll_interval]
183
+ def scaled_poll_interval(process_count)
184
+ process_count * @config[:average_scheduled_poll_interval]
152
185
  end
153
186
 
154
187
  def process_count
155
- pcount = Sidekiq::ProcessSet.new.size
188
+ pcount = Sidekiq.redis { |conn| conn.scard("processes") }
156
189
  pcount = 1 if pcount == 0
157
190
  pcount
158
191
  end
159
192
 
193
+ # A copy of Sidekiq::ProcessSet#cleanup because server
194
+ # should never depend on sidekiq/api.
195
+ def cleanup
196
+ # dont run cleanup more than once per minute
197
+ return 0 unless Sidekiq.redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) }
198
+
199
+ count = 0
200
+ Sidekiq.redis do |conn|
201
+ procs = conn.sscan_each("processes").to_a
202
+ heartbeats = conn.pipelined { |pipeline|
203
+ procs.each do |key|
204
+ pipeline.hget(key, "info")
205
+ end
206
+ }
207
+
208
+ # the hash named key has an expiry of 60 seconds.
209
+ # if it's not found, that means the process has not reported
210
+ # in to Redis and probably died.
211
+ to_prune = procs.select.with_index { |proc, i|
212
+ heartbeats[i].nil?
213
+ }
214
+ count = conn.srem("processes", to_prune) unless to_prune.empty?
215
+ end
216
+ count
217
+ end
218
+
160
219
  def initial_wait
161
- # Have all processes sleep between 5-15 seconds. 10 seconds
162
- # to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
220
+ # Have all processes sleep between 5-15 seconds. 10 seconds to give time for
221
+ # the heartbeat to register (if the poll interval is going to be calculated by the number
163
222
  # of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
164
223
  total = 0
165
- total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average]
224
+ total += INITIAL_WAIT unless @config[:poll_interval_average]
166
225
  total += (5 * rand)
167
226
 
168
227
  @sleeper.pop(total)
169
228
  rescue Timeout::Error
229
+ ensure
230
+ # periodically clean out the `processes` set in Redis which can collect
231
+ # references to dead processes over time. The process count affects how
232
+ # often we scan for scheduled jobs.
233
+ cleanup
170
234
  end
171
235
  end
172
236
  end
@@ -4,7 +4,7 @@ require "sidekiq/testing"
4
4
 
5
5
  ##
6
6
  # The Sidekiq inline infrastructure overrides perform_async so that it
7
- # actually calls perform instead. This allows workers to be run inline in a
7
+ # actually calls perform instead. This allows jobs to be run inline in a
8
8
  # testing environment.
9
9
  #
10
10
  # This is similar to `Resque.inline = true` functionality.
@@ -15,8 +15,8 @@ require "sidekiq/testing"
15
15
  #
16
16
  # $external_variable = 0
17
17
  #
18
- # class ExternalWorker
19
- # include Sidekiq::Worker
18
+ # class ExternalJob
19
+ # include Sidekiq::Job
20
20
  #
21
21
  # def perform
22
22
  # $external_variable = 1
@@ -24,7 +24,7 @@ require "sidekiq/testing"
24
24
  # end
25
25
  #
26
26
  # assert_equal 0, $external_variable
27
- # ExternalWorker.perform_async
27
+ # ExternalJob.perform_async
28
28
  # assert_equal 1, $external_variable
29
29
  #
30
30
  Sidekiq::Testing.inline!
@@ -101,20 +101,20 @@ module Sidekiq
101
101
  ##
102
102
  # The Queues class is only for testing the fake queue implementation.
103
103
  # There are 2 data structures involved in tandem. This is due to the
104
- # Rspec syntax of change(QueueWorker.jobs, :size). It keeps a reference
104
+ # Rspec syntax of change(HardJob.jobs, :size). It keeps a reference
105
105
  # to the array. Because the array was dervied from a filter of the total
106
106
  # jobs enqueued, it appeared as though the array didn't change.
107
107
  #
108
108
  # To solve this, we'll keep 2 hashes containing the jobs. One with keys based
109
- # on the queue, and another with keys of the worker names, so the array for
110
- # QueueWorker.jobs is a straight reference to a real array.
109
+ # on the queue, and another with keys of the job type, so the array for
110
+ # HardJob.jobs is a straight reference to a real array.
111
111
  #
112
112
  # Queue-based hash:
113
113
  #
114
114
  # {
115
115
  # "default"=>[
116
116
  # {
117
- # "class"=>"TestTesting::QueueWorker",
117
+ # "class"=>"TestTesting::HardJob",
118
118
  # "args"=>[1, 2],
119
119
  # "retry"=>true,
120
120
  # "queue"=>"default",
@@ -124,12 +124,12 @@ module Sidekiq
124
124
  # ]
125
125
  # }
126
126
  #
127
- # Worker-based hash:
127
+ # Job-based hash:
128
128
  #
129
129
  # {
130
- # "TestTesting::QueueWorker"=>[
130
+ # "TestTesting::HardJob"=>[
131
131
  # {
132
- # "class"=>"TestTesting::QueueWorker",
132
+ # "class"=>"TestTesting::HardJob",
133
133
  # "args"=>[1, 2],
134
134
  # "retry"=>true,
135
135
  # "queue"=>"default",
@@ -144,14 +144,14 @@ module Sidekiq
144
144
  # require 'sidekiq/testing'
145
145
  #
146
146
  # assert_equal 0, Sidekiq::Queues["default"].size
147
- # HardWorker.perform_async(:something)
147
+ # HardJob.perform_async(:something)
148
148
  # assert_equal 1, Sidekiq::Queues["default"].size
149
149
  # assert_equal :something, Sidekiq::Queues["default"].first['args'][0]
150
150
  #
151
- # You can also clear all workers' jobs:
151
+ # You can also clear all jobs:
152
152
  #
153
153
  # assert_equal 0, Sidekiq::Queues["default"].size
154
- # HardWorker.perform_async(:something)
154
+ # HardJob.perform_async(:something)
155
155
  # Sidekiq::Queues.clear_all
156
156
  # assert_equal 0, Sidekiq::Queues["default"].size
157
157
  #
@@ -170,35 +170,36 @@ module Sidekiq
170
170
 
171
171
  def push(queue, klass, job)
172
172
  jobs_by_queue[queue] << job
173
- jobs_by_worker[klass] << job
173
+ jobs_by_class[klass] << job
174
174
  end
175
175
 
176
176
  def jobs_by_queue
177
177
  @jobs_by_queue ||= Hash.new { |hash, key| hash[key] = [] }
178
178
  end
179
179
 
180
- def jobs_by_worker
181
- @jobs_by_worker ||= Hash.new { |hash, key| hash[key] = [] }
180
+ def jobs_by_class
181
+ @jobs_by_class ||= Hash.new { |hash, key| hash[key] = [] }
182
182
  end
183
+ alias_method :jobs_by_worker, :jobs_by_class
183
184
 
184
185
  def delete_for(jid, queue, klass)
185
186
  jobs_by_queue[queue.to_s].delete_if { |job| job["jid"] == jid }
186
- jobs_by_worker[klass].delete_if { |job| job["jid"] == jid }
187
+ jobs_by_class[klass].delete_if { |job| job["jid"] == jid }
187
188
  end
188
189
 
189
190
  def clear_for(queue, klass)
190
- jobs_by_queue[queue].clear
191
- jobs_by_worker[klass].clear
191
+ jobs_by_queue[queue.to_s].clear
192
+ jobs_by_class[klass].clear
192
193
  end
193
194
 
194
195
  def clear_all
195
196
  jobs_by_queue.clear
196
- jobs_by_worker.clear
197
+ jobs_by_class.clear
197
198
  end
198
199
  end
199
200
  end
200
201
 
201
- module Worker
202
+ module Job
202
203
  ##
203
204
  # The Sidekiq testing infrastructure overrides perform_async
204
205
  # so that it does not actually touch the network. Instead it
@@ -212,16 +213,16 @@ module Sidekiq
212
213
  #
213
214
  # require 'sidekiq/testing'
214
215
  #
215
- # assert_equal 0, HardWorker.jobs.size
216
- # HardWorker.perform_async(:something)
217
- # assert_equal 1, HardWorker.jobs.size
218
- # assert_equal :something, HardWorker.jobs[0]['args'][0]
216
+ # assert_equal 0, HardJob.jobs.size
217
+ # HardJob.perform_async(:something)
218
+ # assert_equal 1, HardJob.jobs.size
219
+ # assert_equal :something, HardJob.jobs[0]['args'][0]
219
220
  #
220
221
  # assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
221
222
  # MyMailer.delay.send_welcome_email('foo@example.com')
222
223
  # assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
223
224
  #
224
- # You can also clear and drain all workers' jobs:
225
+ # You can also clear and drain all job types:
225
226
  #
226
227
  # assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
227
228
  # assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
@@ -241,14 +242,14 @@ module Sidekiq
241
242
  #
242
243
  # RSpec.configure do |config|
243
244
  # config.before(:each) do
244
- # Sidekiq::Worker.clear_all
245
+ # Sidekiq::Job.clear_all
245
246
  # end
246
247
  # end
247
248
  #
248
249
  # or for acceptance testing, i.e. with cucumber:
249
250
  #
250
251
  # AfterStep do
251
- # Sidekiq::Worker.drain_all
252
+ # Sidekiq::Job.drain_all
252
253
  # end
253
254
  #
254
255
  # When I sign up as "foo@example.com"
@@ -262,7 +263,7 @@ module Sidekiq
262
263
 
263
264
  # Jobs queued for this worker
264
265
  def jobs
265
- Queues.jobs_by_worker[to_s]
266
+ Queues.jobs_by_class[to_s]
266
267
  end
267
268
 
268
269
  # Clear all jobs for this worker
@@ -288,11 +289,11 @@ module Sidekiq
288
289
  end
289
290
 
290
291
  def process_job(job)
291
- worker = new
292
- worker.jid = job["jid"]
293
- worker.bid = job["bid"] if worker.respond_to?(:bid=)
294
- Sidekiq::Testing.server_middleware.invoke(worker, job, job["queue"]) do
295
- execute_job(worker, job["args"])
292
+ inst = new
293
+ inst.jid = job["jid"]
294
+ inst.bid = job["bid"] if inst.respond_to?(:bid=)
295
+ Sidekiq::Testing.server_middleware.invoke(inst, job, job["queue"]) do
296
+ execute_job(inst, job["args"])
296
297
  end
297
298
  end
298
299
 
@@ -306,18 +307,18 @@ module Sidekiq
306
307
  Queues.jobs_by_queue.values.flatten
307
308
  end
308
309
 
309
- # Clear all queued jobs across all workers
310
+ # Clear all queued jobs
310
311
  def clear_all
311
312
  Queues.clear_all
312
313
  end
313
314
 
314
- # Drain all queued jobs across all workers
315
+ # Drain (execute) all queued jobs
315
316
  def drain_all
316
317
  while jobs.any?
317
- worker_classes = jobs.map { |job| job["class"] }.uniq
318
+ job_classes = jobs.map { |job| job["class"] }.uniq
318
319
 
319
- worker_classes.each do |worker_class|
320
- Sidekiq::Testing.constantize(worker_class).drain
320
+ job_classes.each do |job_class|
321
+ Sidekiq::Testing.constantize(job_class).drain
321
322
  end
322
323
  end
323
324
  end
@@ -338,7 +339,5 @@ module Sidekiq
338
339
  end
339
340
 
340
341
  if defined?(::Rails) && Rails.respond_to?(:env) && !Rails.env.test? && !$TESTING
341
- puts("**************************************************")
342
- puts("⛔️ WARNING: Sidekiq testing API enabled, but this is not the test environment. Your jobs will not go to Redis.")
343
- puts("**************************************************")
342
+ warn("⛔️ WARNING: Sidekiq testing API enabled, but this is not the test environment. Your jobs will not go to Redis.", uplevel: 1)
344
343
  end