sidekiq 6.2.2 → 7.1.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (120) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +299 -11
  3. data/LICENSE.txt +9 -0
  4. data/README.md +45 -32
  5. data/bin/sidekiq +4 -9
  6. data/bin/sidekiqload +207 -117
  7. data/bin/sidekiqmon +4 -1
  8. data/lib/generators/sidekiq/job_generator.rb +57 -0
  9. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  10. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  11. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  12. data/lib/sidekiq/api.rb +334 -190
  13. data/lib/sidekiq/capsule.rb +127 -0
  14. data/lib/sidekiq/cli.rb +95 -81
  15. data/lib/sidekiq/client.rb +102 -96
  16. data/lib/sidekiq/{util.rb → component.rb} +14 -41
  17. data/lib/sidekiq/config.rb +278 -0
  18. data/lib/sidekiq/deploy.rb +62 -0
  19. data/lib/sidekiq/embedded.rb +61 -0
  20. data/lib/sidekiq/fetch.rb +26 -26
  21. data/lib/sidekiq/job.rb +371 -5
  22. data/lib/sidekiq/job_logger.rb +16 -28
  23. data/lib/sidekiq/job_retry.rb +85 -59
  24. data/lib/sidekiq/job_util.rb +105 -0
  25. data/lib/sidekiq/launcher.rb +106 -94
  26. data/lib/sidekiq/logger.rb +9 -44
  27. data/lib/sidekiq/manager.rb +40 -41
  28. data/lib/sidekiq/metrics/query.rb +153 -0
  29. data/lib/sidekiq/metrics/shared.rb +95 -0
  30. data/lib/sidekiq/metrics/tracking.rb +136 -0
  31. data/lib/sidekiq/middleware/chain.rb +96 -51
  32. data/lib/sidekiq/middleware/current_attributes.rb +95 -0
  33. data/lib/sidekiq/middleware/i18n.rb +6 -4
  34. data/lib/sidekiq/middleware/modules.rb +21 -0
  35. data/lib/sidekiq/monitor.rb +17 -4
  36. data/lib/sidekiq/paginator.rb +17 -9
  37. data/lib/sidekiq/processor.rb +60 -60
  38. data/lib/sidekiq/rails.rb +29 -6
  39. data/lib/sidekiq/redis_client_adapter.rb +96 -0
  40. data/lib/sidekiq/redis_connection.rb +17 -88
  41. data/lib/sidekiq/ring_buffer.rb +29 -0
  42. data/lib/sidekiq/scheduled.rb +101 -44
  43. data/lib/sidekiq/testing/inline.rb +4 -4
  44. data/lib/sidekiq/testing.rb +41 -68
  45. data/lib/sidekiq/transaction_aware_client.rb +44 -0
  46. data/lib/sidekiq/version.rb +2 -1
  47. data/lib/sidekiq/web/action.rb +3 -3
  48. data/lib/sidekiq/web/application.rb +47 -13
  49. data/lib/sidekiq/web/csrf_protection.rb +3 -3
  50. data/lib/sidekiq/web/helpers.rb +36 -33
  51. data/lib/sidekiq/web.rb +10 -17
  52. data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
  53. data/lib/sidekiq.rb +86 -201
  54. data/sidekiq.gemspec +12 -10
  55. data/web/assets/javascripts/application.js +131 -60
  56. data/web/assets/javascripts/base-charts.js +106 -0
  57. data/web/assets/javascripts/chart.min.js +13 -0
  58. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  59. data/web/assets/javascripts/dashboard-charts.js +166 -0
  60. data/web/assets/javascripts/dashboard.js +36 -273
  61. data/web/assets/javascripts/metrics.js +264 -0
  62. data/web/assets/stylesheets/application-dark.css +23 -23
  63. data/web/assets/stylesheets/application-rtl.css +2 -95
  64. data/web/assets/stylesheets/application.css +73 -402
  65. data/web/locales/ar.yml +70 -70
  66. data/web/locales/cs.yml +62 -62
  67. data/web/locales/da.yml +60 -53
  68. data/web/locales/de.yml +65 -65
  69. data/web/locales/el.yml +43 -24
  70. data/web/locales/en.yml +82 -69
  71. data/web/locales/es.yml +68 -68
  72. data/web/locales/fa.yml +65 -65
  73. data/web/locales/fr.yml +81 -67
  74. data/web/locales/gd.yml +99 -0
  75. data/web/locales/he.yml +65 -64
  76. data/web/locales/hi.yml +59 -59
  77. data/web/locales/it.yml +53 -53
  78. data/web/locales/ja.yml +73 -68
  79. data/web/locales/ko.yml +52 -52
  80. data/web/locales/lt.yml +66 -66
  81. data/web/locales/nb.yml +61 -61
  82. data/web/locales/nl.yml +52 -52
  83. data/web/locales/pl.yml +45 -45
  84. data/web/locales/pt-br.yml +63 -55
  85. data/web/locales/pt.yml +51 -51
  86. data/web/locales/ru.yml +67 -66
  87. data/web/locales/sv.yml +53 -53
  88. data/web/locales/ta.yml +60 -60
  89. data/web/locales/uk.yml +62 -61
  90. data/web/locales/ur.yml +64 -64
  91. data/web/locales/vi.yml +67 -67
  92. data/web/locales/zh-cn.yml +43 -16
  93. data/web/locales/zh-tw.yml +42 -8
  94. data/web/views/_footer.erb +6 -3
  95. data/web/views/_job_info.erb +18 -2
  96. data/web/views/_metrics_period_select.erb +12 -0
  97. data/web/views/_nav.erb +1 -1
  98. data/web/views/_paging.erb +2 -0
  99. data/web/views/_poll_link.erb +3 -6
  100. data/web/views/_summary.erb +7 -7
  101. data/web/views/busy.erb +44 -28
  102. data/web/views/dashboard.erb +44 -12
  103. data/web/views/layout.erb +1 -1
  104. data/web/views/metrics.erb +82 -0
  105. data/web/views/metrics_for_job.erb +68 -0
  106. data/web/views/morgue.erb +5 -9
  107. data/web/views/queue.erb +24 -24
  108. data/web/views/queues.erb +4 -2
  109. data/web/views/retries.erb +5 -9
  110. data/web/views/scheduled.erb +12 -13
  111. metadata +62 -31
  112. data/LICENSE +0 -9
  113. data/lib/generators/sidekiq/worker_generator.rb +0 -57
  114. data/lib/sidekiq/delay.rb +0 -41
  115. data/lib/sidekiq/exception_handler.rb +0 -27
  116. data/lib/sidekiq/extensions/action_mailer.rb +0 -48
  117. data/lib/sidekiq/extensions/active_record.rb +0 -43
  118. data/lib/sidekiq/extensions/class_methods.rb +0 -43
  119. data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
  120. data/lib/sidekiq/worker.rb +0 -244
@@ -1,105 +1,38 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "connection_pool"
4
- require "redis"
5
4
  require "uri"
5
+ require "sidekiq/redis_client_adapter"
6
6
 
7
7
  module Sidekiq
8
- class RedisConnection
8
+ module RedisConnection
9
9
  class << self
10
10
  def create(options = {})
11
11
  symbolized_options = options.transform_keys(&:to_sym)
12
+ symbolized_options[:url] ||= determine_redis_provider
12
13
 
13
- if !symbolized_options[:url] && (u = determine_redis_provider)
14
- symbolized_options[:url] = u
15
- end
16
-
17
- size = if symbolized_options[:size]
18
- symbolized_options[:size]
19
- elsif Sidekiq.server?
20
- # Give ourselves plenty of connections. pool is lazy
21
- # so we won't create them until we need them.
22
- Sidekiq.options[:concurrency] + 5
23
- elsif ENV["RAILS_MAX_THREADS"]
24
- Integer(ENV["RAILS_MAX_THREADS"])
25
- else
26
- 5
27
- end
14
+ logger = symbolized_options.delete(:logger)
15
+ logger&.info { "Sidekiq #{Sidekiq::VERSION} connecting to Redis with options #{scrub(symbolized_options)}" }
28
16
 
29
- verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server?
17
+ size = symbolized_options.delete(:size) || 5
18
+ pool_timeout = symbolized_options.delete(:pool_timeout) || 1
19
+ pool_name = symbolized_options.delete(:pool_name)
30
20
 
31
- pool_timeout = symbolized_options[:pool_timeout] || 1
32
- log_info(symbolized_options)
33
-
34
- ConnectionPool.new(timeout: pool_timeout, size: size) do
35
- build_client(symbolized_options)
21
+ redis_config = Sidekiq::RedisClientAdapter.new(symbolized_options)
22
+ ConnectionPool.new(timeout: pool_timeout, size: size, name: pool_name) do
23
+ redis_config.new_client
36
24
  end
37
25
  end
38
26
 
39
27
  private
40
28
 
41
- # Sidekiq needs a lot of concurrent Redis connections.
42
- #
43
- # We need a connection for each Processor.
44
- # We need a connection for Pro's real-time change listener
45
- # We need a connection to various features to call Redis every few seconds:
46
- # - the process heartbeat.
47
- # - enterprise's leader election
48
- # - enterprise's cron support
49
- def verify_sizing(size, concurrency)
50
- raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
51
- end
52
-
53
- def build_client(options)
54
- namespace = options[:namespace]
55
-
56
- client = Redis.new client_opts(options)
57
- if namespace
58
- begin
59
- require "redis/namespace"
60
- Redis::Namespace.new(namespace, redis: client)
61
- rescue LoadError
62
- Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \
63
- "Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.")
64
- exit(-127)
65
- end
66
- else
67
- client
68
- end
69
- end
70
-
71
- def client_opts(options)
72
- opts = options.dup
73
- if opts[:namespace]
74
- opts.delete(:namespace)
75
- end
76
-
77
- if opts[:network_timeout]
78
- opts[:timeout] = opts[:network_timeout]
79
- opts.delete(:network_timeout)
80
- end
81
-
82
- opts[:driver] ||= Redis::Connection.drivers.last || "ruby"
83
-
84
- # Issue #3303, redis-rb will silently retry an operation.
85
- # This can lead to duplicate jobs if Sidekiq::Client's LPUSH
86
- # is performed twice but I believe this is much, much rarer
87
- # than the reconnect silently fixing a problem; we keep it
88
- # on by default.
89
- opts[:reconnect_attempts] ||= 1
90
-
91
- opts
92
- end
93
-
94
- def log_info(options)
29
+ def scrub(options)
95
30
  redacted = "REDACTED"
96
31
 
97
- # deep clone so we can muck with these options all we want
98
- #
99
- # exclude SSL params from dump-and-load because some information isn't
100
- # safely dumpable in current Rubies
101
- keys = options.keys
102
- keys.delete(:ssl_params)
32
+ # Deep clone so we can muck with these options all we want and exclude
33
+ # params from dump-and-load that may contain objects that Marshal is
34
+ # unable to safely dump.
35
+ keys = options.keys - [:logger, :ssl_params]
103
36
  scrubbed_options = Marshal.load(Marshal.dump(options.slice(*keys)))
104
37
  if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
105
38
  uri.password = redacted
@@ -111,11 +44,7 @@ module Sidekiq
111
44
  scrubbed_options[:sentinels]&.each do |sentinel|
112
45
  sentinel[:password] = redacted if sentinel[:password]
113
46
  end
114
- if Sidekiq.server?
115
- Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with redis options #{scrubbed_options}")
116
- else
117
- Sidekiq.logger.debug("#{Sidekiq::NAME} client with redis options #{scrubbed_options}")
118
- end
47
+ scrubbed_options
119
48
  end
120
49
 
121
50
  def determine_redis_provider
@@ -0,0 +1,29 @@
1
+ require "forwardable"
2
+
3
+ module Sidekiq
4
+ class RingBuffer
5
+ include Enumerable
6
+ extend Forwardable
7
+ def_delegators :@buf, :[], :each, :size
8
+
9
+ def initialize(size, default = 0)
10
+ @size = size
11
+ @buf = Array.new(size, default)
12
+ @index = 0
13
+ end
14
+
15
+ def <<(element)
16
+ @buf[@index % @size] = element
17
+ @index += 1
18
+ element
19
+ end
20
+
21
+ def buffer
22
+ @buf
23
+ end
24
+
25
+ def reset(default = 0)
26
+ @buf.fill(default)
27
+ end
28
+ end
29
+ end
@@ -1,37 +1,66 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "sidekiq"
4
- require "sidekiq/util"
5
- require "sidekiq/api"
4
+ require "sidekiq/component"
6
5
 
7
6
  module Sidekiq
8
7
  module Scheduled
9
8
  SETS = %w[retry schedule]
10
9
 
11
10
  class Enq
12
- def enqueue_jobs(now = Time.now.to_f.to_s, sorted_sets = SETS)
11
+ include Sidekiq::Component
12
+
13
+ LUA_ZPOPBYSCORE = <<~LUA
14
+ local key, now = KEYS[1], ARGV[1]
15
+ local jobs = redis.call("zrange", key, "-inf", now, "byscore", "limit", 0, 1)
16
+ if jobs[1] then
17
+ redis.call("zrem", key, jobs[1])
18
+ return jobs[1]
19
+ end
20
+ LUA
21
+
22
+ def initialize(container)
23
+ @config = container
24
+ @client = Sidekiq::Client.new(config: container)
25
+ @done = false
26
+ @lua_zpopbyscore_sha = nil
27
+ end
28
+
29
+ def enqueue_jobs(sorted_sets = SETS)
13
30
  # A job's "score" in Redis is the time at which it should be processed.
14
31
  # Just check Redis for the set of jobs with a timestamp before now.
15
- Sidekiq.redis do |conn|
32
+ redis do |conn|
16
33
  sorted_sets.each do |sorted_set|
17
- # Get next items in the queue with scores (time to execute) <= now.
18
- until (jobs = conn.zrangebyscore(sorted_set, "-inf", now, limit: [0, 100])).empty?
19
- # We need to go through the list one at a time to reduce the risk of something
20
- # going wrong between the time jobs are popped from the scheduled queue and when
21
- # they are pushed onto a work queue and losing the jobs.
22
- jobs.each do |job|
23
- # Pop item off the queue and add it to the work queue. If the job can't be popped from
24
- # the queue, it's because another process already popped it so we can move on to the
25
- # next one.
26
- if conn.zrem(sorted_set, job)
27
- Sidekiq::Client.push(Sidekiq.load_json(job))
28
- Sidekiq.logger.debug { "enqueued #{sorted_set}: #{job}" }
29
- end
30
- end
34
+ # Get next item in the queue with score (time to execute) <= now.
35
+ # We need to go through the list one at a time to reduce the risk of something
36
+ # going wrong between the time jobs are popped from the scheduled queue and when
37
+ # they are pushed onto a work queue and losing the jobs.
38
+ while !@done && (job = zpopbyscore(conn, keys: [sorted_set], argv: [Time.now.to_f.to_s]))
39
+ @client.push(Sidekiq.load_json(job))
40
+ logger.debug { "enqueued #{sorted_set}: #{job}" }
31
41
  end
32
42
  end
33
43
  end
34
44
  end
45
+
46
+ def terminate
47
+ @done = true
48
+ end
49
+
50
+ private
51
+
52
+ def zpopbyscore(conn, keys: nil, argv: nil)
53
+ if @lua_zpopbyscore_sha.nil?
54
+ @lua_zpopbyscore_sha = conn.script(:load, LUA_ZPOPBYSCORE)
55
+ end
56
+
57
+ conn.call("EVALSHA", @lua_zpopbyscore_sha, keys.size, *keys, *argv)
58
+ rescue RedisClient::CommandError => e
59
+ raise unless e.message.start_with?("NOSCRIPT")
60
+
61
+ @lua_zpopbyscore_sha = nil
62
+ retry
63
+ end
35
64
  end
36
65
 
37
66
  ##
@@ -40,12 +69,13 @@ module Sidekiq
40
69
  # just pops the job back onto its original queue so the
41
70
  # workers can pick it up like any other job.
42
71
  class Poller
43
- include Util
72
+ include Sidekiq::Component
44
73
 
45
74
  INITIAL_WAIT = 10
46
75
 
47
- def initialize
48
- @enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
76
+ def initialize(config)
77
+ @config = config
78
+ @enq = (config[:scheduled_enq] || Sidekiq::Scheduled::Enq).new(config)
49
79
  @sleeper = ConnectionPool::TimedStack.new
50
80
  @done = false
51
81
  @thread = nil
@@ -55,12 +85,10 @@ module Sidekiq
55
85
  # Shut down this instance, will pause until the thread is dead.
56
86
  def terminate
57
87
  @done = true
58
- if @thread
59
- t = @thread
60
- @thread = nil
61
- @sleeper << 0
62
- t.value
63
- end
88
+ @enq.terminate
89
+
90
+ @sleeper << 0
91
+ @thread&.value
64
92
  end
65
93
 
66
94
  def start
@@ -71,7 +99,7 @@ module Sidekiq
71
99
  enqueue
72
100
  wait
73
101
  end
74
- Sidekiq.logger.info("Scheduler exiting...")
102
+ logger.info("Scheduler exiting...")
75
103
  }
76
104
  end
77
105
 
@@ -118,13 +146,16 @@ module Sidekiq
118
146
  # As we run more processes, the scheduling interval average will approach an even spread
119
147
  # between 0 and poll interval so we don't need this artifical boost.
120
148
  #
121
- if process_count < 10
149
+ count = process_count
150
+ interval = poll_interval_average(count)
151
+
152
+ if count < 10
122
153
  # For small clusters, calculate a random interval that is ±50% the desired average.
123
- poll_interval_average * rand + poll_interval_average.to_f / 2
154
+ interval * rand + interval.to_f / 2
124
155
  else
125
156
  # With 10+ processes, we should have enough randomness to get decent polling
126
157
  # across the entire timespan
127
- poll_interval_average * rand
158
+ interval * rand
128
159
  end
129
160
  end
130
161
 
@@ -141,38 +172,64 @@ module Sidekiq
141
172
  # the same time: the thundering herd problem.
142
173
  #
143
174
  # We only do this if poll_interval_average is unset (the default).
144
- def poll_interval_average
145
- Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval
175
+ def poll_interval_average(count)
176
+ @config[:poll_interval_average] || scaled_poll_interval(count)
146
177
  end
147
178
 
148
179
  # Calculates an average poll interval based on the number of known Sidekiq processes.
149
180
  # This minimizes a single point of failure by dispersing check-ins but without taxing
150
181
  # Redis if you run many Sidekiq processes.
151
- def scaled_poll_interval
152
- process_count * Sidekiq.options[:average_scheduled_poll_interval]
182
+ def scaled_poll_interval(process_count)
183
+ process_count * @config[:average_scheduled_poll_interval]
153
184
  end
154
185
 
155
186
  def process_count
156
- # The work buried within Sidekiq::ProcessSet#cleanup can be
157
- # expensive at scale. Cut it down by 90% with this counter.
158
- # NB: This method is only called by the scheduler thread so we
159
- # don't need to worry about the thread safety of +=.
160
- pcount = Sidekiq::ProcessSet.new(@count_calls % 10 == 0).size
187
+ pcount = Sidekiq.redis { |conn| conn.scard("processes") }
161
188
  pcount = 1 if pcount == 0
162
- @count_calls += 1
163
189
  pcount
164
190
  end
165
191
 
192
+ # A copy of Sidekiq::ProcessSet#cleanup because server
193
+ # should never depend on sidekiq/api.
194
+ def cleanup
195
+ # dont run cleanup more than once per minute
196
+ return 0 unless redis { |conn| conn.set("process_cleanup", "1", nx: true, ex: 60) }
197
+
198
+ count = 0
199
+ redis do |conn|
200
+ procs = conn.sscan("processes").to_a
201
+ heartbeats = conn.pipelined { |pipeline|
202
+ procs.each do |key|
203
+ pipeline.hget(key, "info")
204
+ end
205
+ }
206
+
207
+ # the hash named key has an expiry of 60 seconds.
208
+ # if it's not found, that means the process has not reported
209
+ # in to Redis and probably died.
210
+ to_prune = procs.select.with_index { |proc, i|
211
+ heartbeats[i].nil?
212
+ }
213
+ count = conn.srem("processes", to_prune) unless to_prune.empty?
214
+ end
215
+ count
216
+ end
217
+
166
218
  def initial_wait
167
- # Have all processes sleep between 5-15 seconds. 10 seconds
168
- # to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
219
+ # Have all processes sleep between 5-15 seconds. 10 seconds to give time for
220
+ # the heartbeat to register (if the poll interval is going to be calculated by the number
169
221
  # of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
170
222
  total = 0
171
- total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average]
223
+ total += INITIAL_WAIT unless @config[:poll_interval_average]
172
224
  total += (5 * rand)
173
225
 
174
226
  @sleeper.pop(total)
175
227
  rescue Timeout::Error
228
+ ensure
229
+ # periodically clean out the `processes` set in Redis which can collect
230
+ # references to dead processes over time. The process count affects how
231
+ # often we scan for scheduled jobs.
232
+ cleanup
176
233
  end
177
234
  end
178
235
  end
@@ -4,7 +4,7 @@ require "sidekiq/testing"
4
4
 
5
5
  ##
6
6
  # The Sidekiq inline infrastructure overrides perform_async so that it
7
- # actually calls perform instead. This allows workers to be run inline in a
7
+ # actually calls perform instead. This allows jobs to be run inline in a
8
8
  # testing environment.
9
9
  #
10
10
  # This is similar to `Resque.inline = true` functionality.
@@ -15,8 +15,8 @@ require "sidekiq/testing"
15
15
  #
16
16
  # $external_variable = 0
17
17
  #
18
- # class ExternalWorker
19
- # include Sidekiq::Worker
18
+ # class ExternalJob
19
+ # include Sidekiq::Job
20
20
  #
21
21
  # def perform
22
22
  # $external_variable = 1
@@ -24,7 +24,7 @@ require "sidekiq/testing"
24
24
  # end
25
25
  #
26
26
  # assert_equal 0, $external_variable
27
- # ExternalWorker.perform_async
27
+ # ExternalJob.perform_async
28
28
  # assert_equal 1, $external_variable
29
29
  #
30
30
  Sidekiq::Testing.inline!
@@ -51,19 +51,10 @@ module Sidekiq
51
51
  end
52
52
 
53
53
  def server_middleware
54
- @server_chain ||= Middleware::Chain.new
54
+ @server_chain ||= Middleware::Chain.new(Sidekiq.default_configuration)
55
55
  yield @server_chain if block_given?
56
56
  @server_chain
57
57
  end
58
-
59
- def constantize(str)
60
- names = str.split("::")
61
- names.shift if names.empty? || names.first.empty?
62
-
63
- names.inject(Object) do |constant, name|
64
- constant.const_defined?(name) ? constant.const_get(name) : constant.const_missing(name)
65
- end
66
- end
67
58
  end
68
59
  end
69
60
 
@@ -83,7 +74,7 @@ module Sidekiq
83
74
  true
84
75
  elsif Sidekiq::Testing.inline?
85
76
  payloads.each do |job|
86
- klass = Sidekiq::Testing.constantize(job["class"])
77
+ klass = Object.const_get(job["class"])
87
78
  job["id"] ||= SecureRandom.hex(12)
88
79
  job_hash = Sidekiq.load_json(Sidekiq.dump_json(job))
89
80
  klass.process_job(job_hash)
@@ -101,20 +92,20 @@ module Sidekiq
101
92
  ##
102
93
  # The Queues class is only for testing the fake queue implementation.
103
94
  # There are 2 data structures involved in tandem. This is due to the
104
- # Rspec syntax of change(QueueWorker.jobs, :size). It keeps a reference
95
+ # Rspec syntax of change(HardJob.jobs, :size). It keeps a reference
105
96
  # to the array. Because the array was dervied from a filter of the total
106
97
  # jobs enqueued, it appeared as though the array didn't change.
107
98
  #
108
99
  # To solve this, we'll keep 2 hashes containing the jobs. One with keys based
109
- # on the queue, and another with keys of the worker names, so the array for
110
- # QueueWorker.jobs is a straight reference to a real array.
100
+ # on the queue, and another with keys of the job type, so the array for
101
+ # HardJob.jobs is a straight reference to a real array.
111
102
  #
112
103
  # Queue-based hash:
113
104
  #
114
105
  # {
115
106
  # "default"=>[
116
107
  # {
117
- # "class"=>"TestTesting::QueueWorker",
108
+ # "class"=>"TestTesting::HardJob",
118
109
  # "args"=>[1, 2],
119
110
  # "retry"=>true,
120
111
  # "queue"=>"default",
@@ -124,12 +115,12 @@ module Sidekiq
124
115
  # ]
125
116
  # }
126
117
  #
127
- # Worker-based hash:
118
+ # Job-based hash:
128
119
  #
129
120
  # {
130
- # "TestTesting::QueueWorker"=>[
121
+ # "TestTesting::HardJob"=>[
131
122
  # {
132
- # "class"=>"TestTesting::QueueWorker",
123
+ # "class"=>"TestTesting::HardJob",
133
124
  # "args"=>[1, 2],
134
125
  # "retry"=>true,
135
126
  # "queue"=>"default",
@@ -144,14 +135,14 @@ module Sidekiq
144
135
  # require 'sidekiq/testing'
145
136
  #
146
137
  # assert_equal 0, Sidekiq::Queues["default"].size
147
- # HardWorker.perform_async(:something)
138
+ # HardJob.perform_async(:something)
148
139
  # assert_equal 1, Sidekiq::Queues["default"].size
149
140
  # assert_equal :something, Sidekiq::Queues["default"].first['args'][0]
150
141
  #
151
- # You can also clear all workers' jobs:
142
+ # You can also clear all jobs:
152
143
  #
153
144
  # assert_equal 0, Sidekiq::Queues["default"].size
154
- # HardWorker.perform_async(:something)
145
+ # HardJob.perform_async(:something)
155
146
  # Sidekiq::Queues.clear_all
156
147
  # assert_equal 0, Sidekiq::Queues["default"].size
157
148
  #
@@ -170,35 +161,36 @@ module Sidekiq
170
161
 
171
162
  def push(queue, klass, job)
172
163
  jobs_by_queue[queue] << job
173
- jobs_by_worker[klass] << job
164
+ jobs_by_class[klass] << job
174
165
  end
175
166
 
176
167
  def jobs_by_queue
177
168
  @jobs_by_queue ||= Hash.new { |hash, key| hash[key] = [] }
178
169
  end
179
170
 
180
- def jobs_by_worker
181
- @jobs_by_worker ||= Hash.new { |hash, key| hash[key] = [] }
171
+ def jobs_by_class
172
+ @jobs_by_class ||= Hash.new { |hash, key| hash[key] = [] }
182
173
  end
174
+ alias_method :jobs_by_worker, :jobs_by_class
183
175
 
184
176
  def delete_for(jid, queue, klass)
185
177
  jobs_by_queue[queue.to_s].delete_if { |job| job["jid"] == jid }
186
- jobs_by_worker[klass].delete_if { |job| job["jid"] == jid }
178
+ jobs_by_class[klass].delete_if { |job| job["jid"] == jid }
187
179
  end
188
180
 
189
181
  def clear_for(queue, klass)
190
- jobs_by_queue[queue].clear
191
- jobs_by_worker[klass].clear
182
+ jobs_by_queue[queue.to_s].clear
183
+ jobs_by_class[klass].clear
192
184
  end
193
185
 
194
186
  def clear_all
195
187
  jobs_by_queue.clear
196
- jobs_by_worker.clear
188
+ jobs_by_class.clear
197
189
  end
198
190
  end
199
191
  end
200
192
 
201
- module Worker
193
+ module Job
202
194
  ##
203
195
  # The Sidekiq testing infrastructure overrides perform_async
204
196
  # so that it does not actually touch the network. Instead it
@@ -212,43 +204,27 @@ module Sidekiq
212
204
  #
213
205
  # require 'sidekiq/testing'
214
206
  #
215
- # assert_equal 0, HardWorker.jobs.size
216
- # HardWorker.perform_async(:something)
217
- # assert_equal 1, HardWorker.jobs.size
218
- # assert_equal :something, HardWorker.jobs[0]['args'][0]
219
- #
220
- # assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
221
- # MyMailer.delay.send_welcome_email('foo@example.com')
222
- # assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
207
+ # assert_equal 0, HardJob.jobs.size
208
+ # HardJob.perform_async(:something)
209
+ # assert_equal 1, HardJob.jobs.size
210
+ # assert_equal :something, HardJob.jobs[0]['args'][0]
223
211
  #
224
- # You can also clear and drain all workers' jobs:
212
+ # You can also clear and drain all job types:
225
213
  #
226
- # assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
227
- # assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
228
- #
229
- # MyMailer.delay.send_welcome_email('foo@example.com')
230
- # MyModel.delay.do_something_hard
231
- #
232
- # assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
233
- # assert_equal 1, Sidekiq::Extensions::DelayedModel.jobs.size
234
- #
235
- # Sidekiq::Worker.clear_all # or .drain_all
236
- #
237
- # assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
238
- # assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
214
+ # Sidekiq::Job.clear_all # or .drain_all
239
215
  #
240
216
  # This can be useful to make sure jobs don't linger between tests:
241
217
  #
242
218
  # RSpec.configure do |config|
243
219
  # config.before(:each) do
244
- # Sidekiq::Worker.clear_all
220
+ # Sidekiq::Job.clear_all
245
221
  # end
246
222
  # end
247
223
  #
248
224
  # or for acceptance testing, i.e. with cucumber:
249
225
  #
250
226
  # AfterStep do
251
- # Sidekiq::Worker.drain_all
227
+ # Sidekiq::Job.drain_all
252
228
  # end
253
229
  #
254
230
  # When I sign up as "foo@example.com"
@@ -262,7 +238,7 @@ module Sidekiq
262
238
 
263
239
  # Jobs queued for this worker
264
240
  def jobs
265
- Queues.jobs_by_worker[to_s]
241
+ Queues.jobs_by_class[to_s]
266
242
  end
267
243
 
268
244
  # Clear all jobs for this worker
@@ -288,11 +264,11 @@ module Sidekiq
288
264
  end
289
265
 
290
266
  def process_job(job)
291
- worker = new
292
- worker.jid = job["jid"]
293
- worker.bid = job["bid"] if worker.respond_to?(:bid=)
294
- Sidekiq::Testing.server_middleware.invoke(worker, job, job["queue"]) do
295
- execute_job(worker, job["args"])
267
+ inst = new
268
+ inst.jid = job["jid"]
269
+ inst.bid = job["bid"] if inst.respond_to?(:bid=)
270
+ Sidekiq::Testing.server_middleware.invoke(inst, job, job["queue"]) do
271
+ execute_job(inst, job["args"])
296
272
  end
297
273
  end
298
274
 
@@ -306,18 +282,18 @@ module Sidekiq
306
282
  Queues.jobs_by_queue.values.flatten
307
283
  end
308
284
 
309
- # Clear all queued jobs across all workers
285
+ # Clear all queued jobs
310
286
  def clear_all
311
287
  Queues.clear_all
312
288
  end
313
289
 
314
- # Drain all queued jobs across all workers
290
+ # Drain (execute) all queued jobs
315
291
  def drain_all
316
292
  while jobs.any?
317
- worker_classes = jobs.map { |job| job["class"] }.uniq
293
+ job_classes = jobs.map { |job| job["class"] }.uniq
318
294
 
319
- worker_classes.each do |worker_class|
320
- Sidekiq::Testing.constantize(worker_class).drain
295
+ job_classes.each do |job_class|
296
+ Object.const_get(job_class).drain
321
297
  end
322
298
  end
323
299
  end
@@ -328,13 +304,10 @@ module Sidekiq
328
304
  def jobs_for(klass)
329
305
  jobs.select do |job|
330
306
  marshalled = job["args"][0]
331
- marshalled.index(klass.to_s) && YAML.load(marshalled)[0] == klass
307
+ marshalled.index(klass.to_s) && YAML.safe_load(marshalled)[0] == klass
332
308
  end
333
309
  end
334
310
  end
335
-
336
- Sidekiq::Extensions::DelayedMailer.extend(TestingExtensions) if defined?(Sidekiq::Extensions::DelayedMailer)
337
- Sidekiq::Extensions::DelayedModel.extend(TestingExtensions) if defined?(Sidekiq::Extensions::DelayedModel)
338
311
  end
339
312
 
340
313
  if defined?(::Rails) && Rails.respond_to?(:env) && !Rails.env.test? && !$TESTING