sidekiq 5.0.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +7 -0
- data/.github/contributing.md +32 -0
- data/.github/issue_template.md +9 -0
- data/.gitignore +13 -0
- data/.travis.yml +18 -0
- data/3.0-Upgrade.md +70 -0
- data/4.0-Upgrade.md +53 -0
- data/5.0-Upgrade.md +56 -0
- data/COMM-LICENSE +95 -0
- data/Changes.md +1402 -0
- data/Ent-Changes.md +174 -0
- data/Gemfile +29 -0
- data/LICENSE +9 -0
- data/Pro-2.0-Upgrade.md +138 -0
- data/Pro-3.0-Upgrade.md +44 -0
- data/Pro-Changes.md +632 -0
- data/README.md +107 -0
- data/Rakefile +12 -0
- data/bin/sidekiq +18 -0
- data/bin/sidekiqctl +99 -0
- data/bin/sidekiqload +149 -0
- data/code_of_conduct.md +50 -0
- data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
- data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
- data/lib/generators/sidekiq/worker_generator.rb +49 -0
- data/lib/sidekiq.rb +228 -0
- data/lib/sidekiq/api.rb +871 -0
- data/lib/sidekiq/cli.rb +413 -0
- data/lib/sidekiq/client.rb +238 -0
- data/lib/sidekiq/core_ext.rb +119 -0
- data/lib/sidekiq/delay.rb +21 -0
- data/lib/sidekiq/exception_handler.rb +31 -0
- data/lib/sidekiq/extensions/action_mailer.rb +57 -0
- data/lib/sidekiq/extensions/active_record.rb +40 -0
- data/lib/sidekiq/extensions/class_methods.rb +40 -0
- data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
- data/lib/sidekiq/fetch.rb +81 -0
- data/lib/sidekiq/job_logger.rb +27 -0
- data/lib/sidekiq/job_retry.rb +235 -0
- data/lib/sidekiq/launcher.rb +167 -0
- data/lib/sidekiq/logging.rb +106 -0
- data/lib/sidekiq/manager.rb +138 -0
- data/lib/sidekiq/middleware/chain.rb +150 -0
- data/lib/sidekiq/middleware/i18n.rb +42 -0
- data/lib/sidekiq/middleware/server/active_record.rb +22 -0
- data/lib/sidekiq/paginator.rb +43 -0
- data/lib/sidekiq/processor.rb +238 -0
- data/lib/sidekiq/rails.rb +60 -0
- data/lib/sidekiq/redis_connection.rb +106 -0
- data/lib/sidekiq/scheduled.rb +147 -0
- data/lib/sidekiq/testing.rb +324 -0
- data/lib/sidekiq/testing/inline.rb +29 -0
- data/lib/sidekiq/util.rb +63 -0
- data/lib/sidekiq/version.rb +4 -0
- data/lib/sidekiq/web.rb +213 -0
- data/lib/sidekiq/web/action.rb +89 -0
- data/lib/sidekiq/web/application.rb +331 -0
- data/lib/sidekiq/web/helpers.rb +286 -0
- data/lib/sidekiq/web/router.rb +100 -0
- data/lib/sidekiq/worker.rb +144 -0
- data/sidekiq.gemspec +32 -0
- data/web/assets/images/favicon.ico +0 -0
- data/web/assets/images/logo.png +0 -0
- data/web/assets/images/status.png +0 -0
- data/web/assets/javascripts/application.js +92 -0
- data/web/assets/javascripts/dashboard.js +298 -0
- data/web/assets/stylesheets/application-rtl.css +246 -0
- data/web/assets/stylesheets/application.css +1111 -0
- data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
- data/web/assets/stylesheets/bootstrap.css +5 -0
- data/web/locales/ar.yml +80 -0
- data/web/locales/cs.yml +78 -0
- data/web/locales/da.yml +68 -0
- data/web/locales/de.yml +69 -0
- data/web/locales/el.yml +68 -0
- data/web/locales/en.yml +79 -0
- data/web/locales/es.yml +69 -0
- data/web/locales/fa.yml +80 -0
- data/web/locales/fr.yml +78 -0
- data/web/locales/he.yml +79 -0
- data/web/locales/hi.yml +75 -0
- data/web/locales/it.yml +69 -0
- data/web/locales/ja.yml +78 -0
- data/web/locales/ko.yml +68 -0
- data/web/locales/nb.yml +77 -0
- data/web/locales/nl.yml +68 -0
- data/web/locales/pl.yml +59 -0
- data/web/locales/pt-br.yml +68 -0
- data/web/locales/pt.yml +67 -0
- data/web/locales/ru.yml +78 -0
- data/web/locales/sv.yml +68 -0
- data/web/locales/ta.yml +75 -0
- data/web/locales/uk.yml +76 -0
- data/web/locales/ur.yml +80 -0
- data/web/locales/zh-cn.yml +68 -0
- data/web/locales/zh-tw.yml +68 -0
- data/web/views/_footer.erb +17 -0
- data/web/views/_job_info.erb +88 -0
- data/web/views/_nav.erb +66 -0
- data/web/views/_paging.erb +23 -0
- data/web/views/_poll_link.erb +7 -0
- data/web/views/_status.erb +4 -0
- data/web/views/_summary.erb +40 -0
- data/web/views/busy.erb +94 -0
- data/web/views/dashboard.erb +75 -0
- data/web/views/dead.erb +34 -0
- data/web/views/layout.erb +40 -0
- data/web/views/morgue.erb +75 -0
- data/web/views/queue.erb +45 -0
- data/web/views/queues.erb +28 -0
- data/web/views/retries.erb +76 -0
- data/web/views/retry.erb +34 -0
- data/web/views/scheduled.erb +54 -0
- data/web/views/scheduled_job_info.erb +8 -0
- metadata +366 -0
@@ -0,0 +1,60 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
module Sidekiq
|
3
|
+
class Rails < ::Rails::Engine
|
4
|
+
# We need to setup this up before any application configuration which might
|
5
|
+
# change Sidekiq middleware.
|
6
|
+
#
|
7
|
+
# This hook happens after `Rails::Application` is inherited within
|
8
|
+
# config/application.rb and before config is touched, usually within the
|
9
|
+
# class block. Definitely before config/environments/*.rb and
|
10
|
+
# config/initializers/*.rb.
|
11
|
+
config.before_configuration do
|
12
|
+
if ::Rails::VERSION::MAJOR < 5 && defined?(::ActiveRecord)
|
13
|
+
Sidekiq.server_middleware do |chain|
|
14
|
+
require 'sidekiq/middleware/server/active_record'
|
15
|
+
chain.add Sidekiq::Middleware::Server::ActiveRecord
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
config.after_initialize do
|
21
|
+
# This hook happens after all initializers are run, just before returning
|
22
|
+
# from config/environment.rb back to sidekiq/cli.rb.
|
23
|
+
# We have to add the reloader after initialize to see if cache_classes has
|
24
|
+
# been turned on.
|
25
|
+
#
|
26
|
+
# None of this matters on the client-side, only within the Sidekiq process itself.
|
27
|
+
#
|
28
|
+
Sidekiq.configure_server do |_|
|
29
|
+
if ::Rails::VERSION::MAJOR >= 5
|
30
|
+
Sidekiq.options[:reloader] = Sidekiq::Rails::Reloader.new
|
31
|
+
Psych::Visitors::ToRuby.prepend(Sidekiq::Rails::PsychAutoload)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
class Reloader
|
37
|
+
def initialize(app = ::Rails.application)
|
38
|
+
@app = app
|
39
|
+
end
|
40
|
+
|
41
|
+
def call
|
42
|
+
@app.reloader.wrap do
|
43
|
+
yield
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
def inspect
|
48
|
+
"#<Sidekiq::Rails::Reloader @app=#{@app.class.name}>"
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
module PsychAutoload
|
53
|
+
def resolve_class(klass_name)
|
54
|
+
klass_name && klass_name.constantize
|
55
|
+
rescue NameError
|
56
|
+
super
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end if defined?(::Rails)
|
60
|
+
end
|
@@ -0,0 +1,106 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'connection_pool'
|
3
|
+
require 'redis'
|
4
|
+
require 'uri'
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
class RedisConnection
|
8
|
+
class << self
|
9
|
+
|
10
|
+
def create(options={})
|
11
|
+
options = options.symbolize_keys
|
12
|
+
|
13
|
+
options[:url] ||= determine_redis_provider
|
14
|
+
|
15
|
+
size = options[:size] || (Sidekiq.server? ? (Sidekiq.options[:concurrency] + 5) : 5)
|
16
|
+
|
17
|
+
verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server?
|
18
|
+
|
19
|
+
pool_timeout = options[:pool_timeout] || 1
|
20
|
+
log_info(options)
|
21
|
+
|
22
|
+
ConnectionPool.new(:timeout => pool_timeout, :size => size) do
|
23
|
+
build_client(options)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
private
|
28
|
+
|
29
|
+
# Sidekiq needs a lot of concurrent Redis connections.
|
30
|
+
#
|
31
|
+
# We need a connection for each Processor.
|
32
|
+
# We need a connection for Pro's real-time change listener
|
33
|
+
# We need a connection to various features to call Redis every few seconds:
|
34
|
+
# - the process heartbeat.
|
35
|
+
# - enterprise's leader election
|
36
|
+
# - enterprise's cron support
|
37
|
+
def verify_sizing(size, concurrency)
|
38
|
+
raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but really needs to have at least #{concurrency + 2}" if size <= concurrency
|
39
|
+
end
|
40
|
+
|
41
|
+
def build_client(options)
|
42
|
+
namespace = options[:namespace]
|
43
|
+
|
44
|
+
client = Redis.new client_opts(options)
|
45
|
+
if namespace
|
46
|
+
begin
|
47
|
+
require 'redis/namespace'
|
48
|
+
Redis::Namespace.new(namespace, :redis => client)
|
49
|
+
rescue LoadError
|
50
|
+
Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \
|
51
|
+
"Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.")
|
52
|
+
exit(-127)
|
53
|
+
end
|
54
|
+
else
|
55
|
+
client
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
def client_opts(options)
|
60
|
+
opts = options.dup
|
61
|
+
if opts[:namespace]
|
62
|
+
opts.delete(:namespace)
|
63
|
+
end
|
64
|
+
|
65
|
+
if opts[:network_timeout]
|
66
|
+
opts[:timeout] = opts[:network_timeout]
|
67
|
+
opts.delete(:network_timeout)
|
68
|
+
end
|
69
|
+
|
70
|
+
opts[:driver] ||= 'ruby'.freeze
|
71
|
+
|
72
|
+
# Issue #3303, redis-rb will silently retry an operation.
|
73
|
+
# This can lead to duplicate jobs if Sidekiq::Client's LPUSH
|
74
|
+
# is performed twice but I believe this is much, much rarer
|
75
|
+
# than the reconnect silently fixing a problem; we keep it
|
76
|
+
# on by default.
|
77
|
+
opts[:reconnect_attempts] ||= 1
|
78
|
+
|
79
|
+
opts
|
80
|
+
end
|
81
|
+
|
82
|
+
def log_info(options)
|
83
|
+
# Don't log Redis AUTH password
|
84
|
+
redacted = "REDACTED"
|
85
|
+
scrubbed_options = options.dup
|
86
|
+
if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
|
87
|
+
uri.password = redacted
|
88
|
+
scrubbed_options[:url] = uri.to_s
|
89
|
+
end
|
90
|
+
if scrubbed_options[:password]
|
91
|
+
scrubbed_options[:password] = redacted
|
92
|
+
end
|
93
|
+
if Sidekiq.server?
|
94
|
+
Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with redis options #{scrubbed_options}")
|
95
|
+
else
|
96
|
+
Sidekiq.logger.debug("#{Sidekiq::NAME} client with redis options #{scrubbed_options}")
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
def determine_redis_provider
|
101
|
+
ENV[ENV['REDIS_PROVIDER'] || 'REDIS_URL']
|
102
|
+
end
|
103
|
+
|
104
|
+
end
|
105
|
+
end
|
106
|
+
end
|
@@ -0,0 +1,147 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'sidekiq'
|
3
|
+
require 'sidekiq/util'
|
4
|
+
require 'sidekiq/api'
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
module Scheduled
|
8
|
+
SETS = %w(retry schedule)
|
9
|
+
|
10
|
+
class Enq
|
11
|
+
def enqueue_jobs(now=Time.now.to_f.to_s, sorted_sets=SETS)
|
12
|
+
# A job's "score" in Redis is the time at which it should be processed.
|
13
|
+
# Just check Redis for the set of jobs with a timestamp before now.
|
14
|
+
Sidekiq.redis do |conn|
|
15
|
+
sorted_sets.each do |sorted_set|
|
16
|
+
# Get the next item in the queue if it's score (time to execute) is <= now.
|
17
|
+
# We need to go through the list one at a time to reduce the risk of something
|
18
|
+
# going wrong between the time jobs are popped from the scheduled queue and when
|
19
|
+
# they are pushed onto a work queue and losing the jobs.
|
20
|
+
while job = conn.zrangebyscore(sorted_set, '-inf'.freeze, now, :limit => [0, 1]).first do
|
21
|
+
|
22
|
+
# Pop item off the queue and add it to the work queue. If the job can't be popped from
|
23
|
+
# the queue, it's because another process already popped it so we can move on to the
|
24
|
+
# next one.
|
25
|
+
if conn.zrem(sorted_set, job)
|
26
|
+
Sidekiq::Client.push(Sidekiq.load_json(job))
|
27
|
+
Sidekiq::Logging.logger.debug { "enqueued #{sorted_set}: #{job}" }
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
##
|
36
|
+
# The Poller checks Redis every N seconds for jobs in the retry or scheduled
|
37
|
+
# set have passed their timestamp and should be enqueued. If so, it
|
38
|
+
# just pops the job back onto its original queue so the
|
39
|
+
# workers can pick it up like any other job.
|
40
|
+
class Poller
|
41
|
+
include Util
|
42
|
+
|
43
|
+
INITIAL_WAIT = 10
|
44
|
+
|
45
|
+
def initialize
|
46
|
+
@enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
|
47
|
+
@sleeper = ConnectionPool::TimedStack.new
|
48
|
+
@done = false
|
49
|
+
@thread = nil
|
50
|
+
end
|
51
|
+
|
52
|
+
# Shut down this instance, will pause until the thread is dead.
|
53
|
+
def terminate
|
54
|
+
@done = true
|
55
|
+
if @thread
|
56
|
+
t = @thread
|
57
|
+
@thread = nil
|
58
|
+
@sleeper << 0
|
59
|
+
t.value
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def start
|
64
|
+
@thread ||= safe_thread("scheduler") do
|
65
|
+
initial_wait
|
66
|
+
|
67
|
+
while !@done
|
68
|
+
enqueue
|
69
|
+
wait
|
70
|
+
end
|
71
|
+
Sidekiq.logger.info("Scheduler exiting...")
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
def enqueue
|
76
|
+
begin
|
77
|
+
@enq.enqueue_jobs
|
78
|
+
rescue => ex
|
79
|
+
# Most likely a problem with redis networking.
|
80
|
+
# Punt and try again at the next interval
|
81
|
+
logger.error ex.message
|
82
|
+
ex.backtrace.each do |bt|
|
83
|
+
logger.error(bt)
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
private
|
89
|
+
|
90
|
+
def wait
|
91
|
+
@sleeper.pop(random_poll_interval)
|
92
|
+
rescue Timeout::Error
|
93
|
+
# expected
|
94
|
+
rescue => ex
|
95
|
+
# if poll_interval_average hasn't been calculated yet, we can
|
96
|
+
# raise an error trying to reach Redis.
|
97
|
+
logger.error ex.message
|
98
|
+
logger.error ex.backtrace.first
|
99
|
+
sleep 5
|
100
|
+
end
|
101
|
+
|
102
|
+
# Calculates a random interval that is ±50% the desired average.
|
103
|
+
def random_poll_interval
|
104
|
+
poll_interval_average * rand + poll_interval_average.to_f / 2
|
105
|
+
end
|
106
|
+
|
107
|
+
# We do our best to tune the poll interval to the size of the active Sidekiq
|
108
|
+
# cluster. If you have 30 processes and poll every 15 seconds, that means one
|
109
|
+
# Sidekiq is checking Redis every 0.5 seconds - way too often for most people
|
110
|
+
# and really bad if the retry or scheduled sets are large.
|
111
|
+
#
|
112
|
+
# Instead try to avoid polling more than once every 15 seconds. If you have
|
113
|
+
# 30 Sidekiq processes, we'll poll every 30 * 15 or 450 seconds.
|
114
|
+
# To keep things statistically random, we'll sleep a random amount between
|
115
|
+
# 225 and 675 seconds for each poll or 450 seconds on average. Otherwise restarting
|
116
|
+
# all your Sidekiq processes at the same time will lead to them all polling at
|
117
|
+
# the same time: the thundering herd problem.
|
118
|
+
#
|
119
|
+
# We only do this if poll_interval_average is unset (the default).
|
120
|
+
def poll_interval_average
|
121
|
+
Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval
|
122
|
+
end
|
123
|
+
|
124
|
+
# Calculates an average poll interval based on the number of known Sidekiq processes.
|
125
|
+
# This minimizes a single point of failure by dispersing check-ins but without taxing
|
126
|
+
# Redis if you run many Sidekiq processes.
|
127
|
+
def scaled_poll_interval
|
128
|
+
pcount = Sidekiq::ProcessSet.new.size
|
129
|
+
pcount = 1 if pcount == 0
|
130
|
+
pcount * Sidekiq.options[:average_scheduled_poll_interval]
|
131
|
+
end
|
132
|
+
|
133
|
+
def initial_wait
|
134
|
+
# Have all processes sleep between 5-15 seconds. 10 seconds
|
135
|
+
# to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
|
136
|
+
# of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
|
137
|
+
total = 0
|
138
|
+
total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average]
|
139
|
+
total += (5 * rand)
|
140
|
+
|
141
|
+
@sleeper.pop(total)
|
142
|
+
rescue Timeout::Error
|
143
|
+
end
|
144
|
+
|
145
|
+
end
|
146
|
+
end
|
147
|
+
end
|
@@ -0,0 +1,324 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'securerandom'
|
3
|
+
require 'sidekiq'
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
|
7
|
+
class Testing
|
8
|
+
class << self
|
9
|
+
attr_accessor :__test_mode
|
10
|
+
|
11
|
+
def __set_test_mode(mode)
|
12
|
+
if block_given?
|
13
|
+
current_mode = self.__test_mode
|
14
|
+
begin
|
15
|
+
self.__test_mode = mode
|
16
|
+
yield
|
17
|
+
ensure
|
18
|
+
self.__test_mode = current_mode
|
19
|
+
end
|
20
|
+
else
|
21
|
+
self.__test_mode = mode
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
def disable!(&block)
|
26
|
+
__set_test_mode(:disable, &block)
|
27
|
+
end
|
28
|
+
|
29
|
+
def fake!(&block)
|
30
|
+
__set_test_mode(:fake, &block)
|
31
|
+
end
|
32
|
+
|
33
|
+
def inline!(&block)
|
34
|
+
__set_test_mode(:inline, &block)
|
35
|
+
end
|
36
|
+
|
37
|
+
def enabled?
|
38
|
+
self.__test_mode != :disable
|
39
|
+
end
|
40
|
+
|
41
|
+
def disabled?
|
42
|
+
self.__test_mode == :disable
|
43
|
+
end
|
44
|
+
|
45
|
+
def fake?
|
46
|
+
self.__test_mode == :fake
|
47
|
+
end
|
48
|
+
|
49
|
+
def inline?
|
50
|
+
self.__test_mode == :inline
|
51
|
+
end
|
52
|
+
|
53
|
+
def server_middleware
|
54
|
+
@server_chain ||= Middleware::Chain.new
|
55
|
+
yield @server_chain if block_given?
|
56
|
+
@server_chain
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
# Default to fake testing to keep old behavior
|
62
|
+
Sidekiq::Testing.fake!
|
63
|
+
|
64
|
+
class EmptyQueueError < RuntimeError; end
|
65
|
+
|
66
|
+
class Client
|
67
|
+
alias_method :raw_push_real, :raw_push
|
68
|
+
|
69
|
+
def raw_push(payloads)
|
70
|
+
if Sidekiq::Testing.fake?
|
71
|
+
payloads.each do |job|
|
72
|
+
job = Sidekiq.load_json(Sidekiq.dump_json(job))
|
73
|
+
job.merge!('enqueued_at' => Time.now.to_f) unless job['at']
|
74
|
+
Queues.push(job['queue'], job['class'], job)
|
75
|
+
end
|
76
|
+
true
|
77
|
+
elsif Sidekiq::Testing.inline?
|
78
|
+
payloads.each do |job|
|
79
|
+
klass = job['class'].constantize
|
80
|
+
job['id'] ||= SecureRandom.hex(12)
|
81
|
+
job_hash = Sidekiq.load_json(Sidekiq.dump_json(job))
|
82
|
+
klass.process_job(job_hash)
|
83
|
+
end
|
84
|
+
true
|
85
|
+
else
|
86
|
+
raw_push_real(payloads)
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
module Queues
|
92
|
+
##
|
93
|
+
# The Queues class is only for testing the fake queue implementation.
|
94
|
+
# There are 2 data structures involved in tandem. This is due to the
|
95
|
+
# Rspec syntax of change(QueueWorker.jobs, :size). It keeps a reference
|
96
|
+
# to the array. Because the array was dervied from a filter of the total
|
97
|
+
# jobs enqueued, it appeared as though the array didn't change.
|
98
|
+
#
|
99
|
+
# To solve this, we'll keep 2 hashes containing the jobs. One with keys based
|
100
|
+
# on the queue, and another with keys of the worker names, so the array for
|
101
|
+
# QueueWorker.jobs is a straight reference to a real array.
|
102
|
+
#
|
103
|
+
# Queue-based hash:
|
104
|
+
#
|
105
|
+
# {
|
106
|
+
# "default"=>[
|
107
|
+
# {
|
108
|
+
# "class"=>"TestTesting::QueueWorker",
|
109
|
+
# "args"=>[1, 2],
|
110
|
+
# "retry"=>true,
|
111
|
+
# "queue"=>"default",
|
112
|
+
# "jid"=>"abc5b065c5c4b27fc1102833",
|
113
|
+
# "created_at"=>1447445554.419934
|
114
|
+
# }
|
115
|
+
# ]
|
116
|
+
# }
|
117
|
+
#
|
118
|
+
# Worker-based hash:
|
119
|
+
#
|
120
|
+
# {
|
121
|
+
# "TestTesting::QueueWorker"=>[
|
122
|
+
# {
|
123
|
+
# "class"=>"TestTesting::QueueWorker",
|
124
|
+
# "args"=>[1, 2],
|
125
|
+
# "retry"=>true,
|
126
|
+
# "queue"=>"default",
|
127
|
+
# "jid"=>"abc5b065c5c4b27fc1102833",
|
128
|
+
# "created_at"=>1447445554.419934
|
129
|
+
# }
|
130
|
+
# ]
|
131
|
+
# }
|
132
|
+
#
|
133
|
+
# Example:
|
134
|
+
#
|
135
|
+
# require 'sidekiq/testing'
|
136
|
+
#
|
137
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
138
|
+
# HardWorker.perform_async(:something)
|
139
|
+
# assert_equal 1, Sidekiq::Queues["default"].size
|
140
|
+
# assert_equal :something, Sidekiq::Queues["default"].first['args'][0]
|
141
|
+
#
|
142
|
+
# You can also clear all workers' jobs:
|
143
|
+
#
|
144
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
145
|
+
# HardWorker.perform_async(:something)
|
146
|
+
# Sidekiq::Queues.clear_all
|
147
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
148
|
+
#
|
149
|
+
# This can be useful to make sure jobs don't linger between tests:
|
150
|
+
#
|
151
|
+
# RSpec.configure do |config|
|
152
|
+
# config.before(:each) do
|
153
|
+
# Sidekiq::Queues.clear_all
|
154
|
+
# end
|
155
|
+
# end
|
156
|
+
#
|
157
|
+
class << self
|
158
|
+
def [](queue)
|
159
|
+
jobs_by_queue[queue]
|
160
|
+
end
|
161
|
+
|
162
|
+
def push(queue, klass, job)
|
163
|
+
jobs_by_queue[queue] << job
|
164
|
+
jobs_by_worker[klass] << job
|
165
|
+
end
|
166
|
+
|
167
|
+
def jobs_by_queue
|
168
|
+
@jobs_by_queue ||= Hash.new { |hash, key| hash[key] = [] }
|
169
|
+
end
|
170
|
+
|
171
|
+
def jobs_by_worker
|
172
|
+
@jobs_by_worker ||= Hash.new { |hash, key| hash[key] = [] }
|
173
|
+
end
|
174
|
+
|
175
|
+
def delete_for(jid, queue, klass)
|
176
|
+
jobs_by_queue[queue.to_s].delete_if { |job| job["jid"] == jid }
|
177
|
+
jobs_by_worker[klass].delete_if { |job| job["jid"] == jid }
|
178
|
+
end
|
179
|
+
|
180
|
+
def clear_for(queue, klass)
|
181
|
+
jobs_by_queue[queue].clear
|
182
|
+
jobs_by_worker[klass].clear
|
183
|
+
end
|
184
|
+
|
185
|
+
def clear_all
|
186
|
+
jobs_by_queue.clear
|
187
|
+
jobs_by_worker.clear
|
188
|
+
end
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
module Worker
|
193
|
+
##
|
194
|
+
# The Sidekiq testing infrastructure overrides perform_async
|
195
|
+
# so that it does not actually touch the network. Instead it
|
196
|
+
# stores the asynchronous jobs in a per-class array so that
|
197
|
+
# their presence/absence can be asserted by your tests.
|
198
|
+
#
|
199
|
+
# This is similar to ActionMailer's :test delivery_method and its
|
200
|
+
# ActionMailer::Base.deliveries array.
|
201
|
+
#
|
202
|
+
# Example:
|
203
|
+
#
|
204
|
+
# require 'sidekiq/testing'
|
205
|
+
#
|
206
|
+
# assert_equal 0, HardWorker.jobs.size
|
207
|
+
# HardWorker.perform_async(:something)
|
208
|
+
# assert_equal 1, HardWorker.jobs.size
|
209
|
+
# assert_equal :something, HardWorker.jobs[0]['args'][0]
|
210
|
+
#
|
211
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
212
|
+
# MyMailer.delay.send_welcome_email('foo@example.com')
|
213
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
|
214
|
+
#
|
215
|
+
# You can also clear and drain all workers' jobs:
|
216
|
+
#
|
217
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
218
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
|
219
|
+
#
|
220
|
+
# MyMailer.delay.send_welcome_email('foo@example.com')
|
221
|
+
# MyModel.delay.do_something_hard
|
222
|
+
#
|
223
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
|
224
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedModel.jobs.size
|
225
|
+
#
|
226
|
+
# Sidekiq::Worker.clear_all # or .drain_all
|
227
|
+
#
|
228
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
229
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
|
230
|
+
#
|
231
|
+
# This can be useful to make sure jobs don't linger between tests:
|
232
|
+
#
|
233
|
+
# RSpec.configure do |config|
|
234
|
+
# config.before(:each) do
|
235
|
+
# Sidekiq::Worker.clear_all
|
236
|
+
# end
|
237
|
+
# end
|
238
|
+
#
|
239
|
+
# or for acceptance testing, i.e. with cucumber:
|
240
|
+
#
|
241
|
+
# AfterStep do
|
242
|
+
# Sidekiq::Worker.drain_all
|
243
|
+
# end
|
244
|
+
#
|
245
|
+
# When I sign up as "foo@example.com"
|
246
|
+
# Then I should receive a welcome email to "foo@example.com"
|
247
|
+
#
|
248
|
+
module ClassMethods
|
249
|
+
|
250
|
+
# Queue for this worker
|
251
|
+
def queue
|
252
|
+
self.sidekiq_options["queue"]
|
253
|
+
end
|
254
|
+
|
255
|
+
# Jobs queued for this worker
|
256
|
+
def jobs
|
257
|
+
Queues.jobs_by_worker[self.to_s]
|
258
|
+
end
|
259
|
+
|
260
|
+
# Clear all jobs for this worker
|
261
|
+
def clear
|
262
|
+
Queues.clear_for(queue, self.to_s)
|
263
|
+
end
|
264
|
+
|
265
|
+
# Drain and run all jobs for this worker
|
266
|
+
def drain
|
267
|
+
while jobs.any?
|
268
|
+
next_job = jobs.first
|
269
|
+
Queues.delete_for(next_job["jid"], next_job["queue"], self.to_s)
|
270
|
+
process_job(next_job)
|
271
|
+
end
|
272
|
+
end
|
273
|
+
|
274
|
+
# Pop out a single job and perform it
|
275
|
+
def perform_one
|
276
|
+
raise(EmptyQueueError, "perform_one called with empty job queue") if jobs.empty?
|
277
|
+
next_job = jobs.first
|
278
|
+
Queues.delete_for(next_job["jid"], queue, self.to_s)
|
279
|
+
process_job(next_job)
|
280
|
+
end
|
281
|
+
|
282
|
+
def process_job(job)
|
283
|
+
worker = new
|
284
|
+
worker.jid = job['jid']
|
285
|
+
worker.bid = job['bid'] if worker.respond_to?(:bid=)
|
286
|
+
Sidekiq::Testing.server_middleware.invoke(worker, job, job['queue']) do
|
287
|
+
execute_job(worker, job['args'])
|
288
|
+
end
|
289
|
+
end
|
290
|
+
|
291
|
+
def execute_job(worker, args)
|
292
|
+
worker.perform(*args)
|
293
|
+
end
|
294
|
+
end
|
295
|
+
|
296
|
+
class << self
|
297
|
+
def jobs # :nodoc:
|
298
|
+
Queues.jobs_by_queue.values.flatten
|
299
|
+
end
|
300
|
+
|
301
|
+
# Clear all queued jobs across all workers
|
302
|
+
def clear_all
|
303
|
+
Queues.clear_all
|
304
|
+
end
|
305
|
+
|
306
|
+
# Drain all queued jobs across all workers
|
307
|
+
def drain_all
|
308
|
+
while jobs.any?
|
309
|
+
worker_classes = jobs.map { |job| job["class"] }.uniq
|
310
|
+
|
311
|
+
worker_classes.each do |worker_class|
|
312
|
+
worker_class.constantize.drain
|
313
|
+
end
|
314
|
+
end
|
315
|
+
end
|
316
|
+
end
|
317
|
+
end
|
318
|
+
end
|
319
|
+
|
320
|
+
if defined?(::Rails) && Rails.respond_to?(:env) && !Rails.env.test?
|
321
|
+
puts("**************************************************")
|
322
|
+
puts("⛔️ WARNING: Sidekiq testing API enabled, but this is not the test environment. Your jobs will not go to Redis.")
|
323
|
+
puts("**************************************************")
|
324
|
+
end
|