sidekiq 6.0.4
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +7 -0
- data/.circleci/config.yml +82 -0
- data/.github/contributing.md +32 -0
- data/.github/issue_template.md +11 -0
- data/.gitignore +13 -0
- data/.standard.yml +20 -0
- data/3.0-Upgrade.md +70 -0
- data/4.0-Upgrade.md +53 -0
- data/5.0-Upgrade.md +56 -0
- data/6.0-Upgrade.md +72 -0
- data/COMM-LICENSE +97 -0
- data/Changes.md +1666 -0
- data/Ent-2.0-Upgrade.md +37 -0
- data/Ent-Changes.md +256 -0
- data/Gemfile +24 -0
- data/Gemfile.lock +199 -0
- data/LICENSE +9 -0
- data/Pro-2.0-Upgrade.md +138 -0
- data/Pro-3.0-Upgrade.md +44 -0
- data/Pro-4.0-Upgrade.md +35 -0
- data/Pro-5.0-Upgrade.md +25 -0
- data/Pro-Changes.md +776 -0
- data/README.md +97 -0
- data/Rakefile +10 -0
- data/bin/sidekiq +18 -0
- data/bin/sidekiqload +157 -0
- data/bin/sidekiqmon +8 -0
- data/code_of_conduct.md +50 -0
- data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
- data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
- data/lib/generators/sidekiq/worker_generator.rb +57 -0
- data/lib/sidekiq.rb +260 -0
- data/lib/sidekiq/api.rb +960 -0
- data/lib/sidekiq/cli.rb +387 -0
- data/lib/sidekiq/client.rb +256 -0
- data/lib/sidekiq/delay.rb +41 -0
- data/lib/sidekiq/exception_handler.rb +27 -0
- data/lib/sidekiq/extensions/action_mailer.rb +47 -0
- data/lib/sidekiq/extensions/active_record.rb +42 -0
- data/lib/sidekiq/extensions/class_methods.rb +42 -0
- data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
- data/lib/sidekiq/fetch.rb +80 -0
- data/lib/sidekiq/job_logger.rb +63 -0
- data/lib/sidekiq/job_retry.rb +262 -0
- data/lib/sidekiq/launcher.rb +179 -0
- data/lib/sidekiq/logger.rb +165 -0
- data/lib/sidekiq/manager.rb +135 -0
- data/lib/sidekiq/middleware/chain.rb +160 -0
- data/lib/sidekiq/middleware/i18n.rb +40 -0
- data/lib/sidekiq/monitor.rb +133 -0
- data/lib/sidekiq/paginator.rb +47 -0
- data/lib/sidekiq/processor.rb +280 -0
- data/lib/sidekiq/rails.rb +52 -0
- data/lib/sidekiq/redis_connection.rb +141 -0
- data/lib/sidekiq/scheduled.rb +173 -0
- data/lib/sidekiq/testing.rb +344 -0
- data/lib/sidekiq/testing/inline.rb +30 -0
- data/lib/sidekiq/util.rb +67 -0
- data/lib/sidekiq/version.rb +5 -0
- data/lib/sidekiq/web.rb +205 -0
- data/lib/sidekiq/web/action.rb +93 -0
- data/lib/sidekiq/web/application.rb +359 -0
- data/lib/sidekiq/web/helpers.rb +336 -0
- data/lib/sidekiq/web/router.rb +103 -0
- data/lib/sidekiq/worker.rb +247 -0
- data/sidekiq.gemspec +21 -0
- data/web/assets/images/favicon.ico +0 -0
- data/web/assets/images/logo.png +0 -0
- data/web/assets/images/status.png +0 -0
- data/web/assets/javascripts/application.js +92 -0
- data/web/assets/javascripts/dashboard.js +296 -0
- data/web/assets/stylesheets/application-dark.css +125 -0
- data/web/assets/stylesheets/application-rtl.css +246 -0
- data/web/assets/stylesheets/application.css +1153 -0
- data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
- data/web/assets/stylesheets/bootstrap.css +5 -0
- data/web/locales/ar.yml +81 -0
- data/web/locales/cs.yml +78 -0
- data/web/locales/da.yml +68 -0
- data/web/locales/de.yml +81 -0
- data/web/locales/el.yml +68 -0
- data/web/locales/en.yml +83 -0
- data/web/locales/es.yml +70 -0
- data/web/locales/fa.yml +80 -0
- data/web/locales/fr.yml +78 -0
- data/web/locales/he.yml +79 -0
- data/web/locales/hi.yml +75 -0
- data/web/locales/it.yml +69 -0
- data/web/locales/ja.yml +81 -0
- data/web/locales/ko.yml +68 -0
- data/web/locales/nb.yml +77 -0
- data/web/locales/nl.yml +68 -0
- data/web/locales/pl.yml +59 -0
- data/web/locales/pt-br.yml +68 -0
- data/web/locales/pt.yml +67 -0
- data/web/locales/ru.yml +78 -0
- data/web/locales/sv.yml +68 -0
- data/web/locales/ta.yml +75 -0
- data/web/locales/uk.yml +76 -0
- data/web/locales/ur.yml +80 -0
- data/web/locales/zh-cn.yml +68 -0
- data/web/locales/zh-tw.yml +68 -0
- data/web/views/_footer.erb +20 -0
- data/web/views/_job_info.erb +89 -0
- data/web/views/_nav.erb +52 -0
- data/web/views/_paging.erb +23 -0
- data/web/views/_poll_link.erb +7 -0
- data/web/views/_status.erb +4 -0
- data/web/views/_summary.erb +40 -0
- data/web/views/busy.erb +101 -0
- data/web/views/dashboard.erb +75 -0
- data/web/views/dead.erb +34 -0
- data/web/views/layout.erb +41 -0
- data/web/views/morgue.erb +78 -0
- data/web/views/queue.erb +55 -0
- data/web/views/queues.erb +38 -0
- data/web/views/retries.erb +83 -0
- data/web/views/retry.erb +34 -0
- data/web/views/scheduled.erb +57 -0
- data/web/views/scheduled_job_info.erb +8 -0
- metadata +221 -0
@@ -0,0 +1,52 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/worker"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
class Rails < ::Rails::Engine
|
7
|
+
# By including the Options module, we allow AJs to directly control sidekiq features
|
8
|
+
# via the *sidekiq_options* class method and, for instance, not use AJ's retry system.
|
9
|
+
# AJ retries don't show up in the Sidekiq UI Retries tab, save any error data, can't be
|
10
|
+
# manually retried, don't automatically die, etc.
|
11
|
+
#
|
12
|
+
# class SomeJob < ActiveJob::Base
|
13
|
+
# queue_as :default
|
14
|
+
# sidekiq_options retry: 3, backtrace: 10
|
15
|
+
# def perform
|
16
|
+
# end
|
17
|
+
# end
|
18
|
+
initializer "sidekiq.active_job_integration" do
|
19
|
+
ActiveSupport.on_load(:active_job) do
|
20
|
+
include ::Sidekiq::Worker::Options unless respond_to?(:sidekiq_options)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
# This hook happens after all initializers are run, just before returning
|
25
|
+
# from config/environment.rb back to sidekiq/cli.rb.
|
26
|
+
# We have to add the reloader after initialize to see if cache_classes has
|
27
|
+
# been turned on.
|
28
|
+
#
|
29
|
+
# None of this matters on the client-side, only within the Sidekiq process itself.
|
30
|
+
config.after_initialize do
|
31
|
+
Sidekiq.configure_server do |_|
|
32
|
+
Sidekiq.options[:reloader] = Sidekiq::Rails::Reloader.new
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
class Reloader
|
37
|
+
def initialize(app = ::Rails.application)
|
38
|
+
@app = app
|
39
|
+
end
|
40
|
+
|
41
|
+
def call
|
42
|
+
@app.reloader.wrap do
|
43
|
+
yield
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
def inspect
|
48
|
+
"#<Sidekiq::Rails::Reloader @app=#{@app.class.name}>"
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,141 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "connection_pool"
|
4
|
+
require "redis"
|
5
|
+
require "uri"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
class RedisConnection
|
9
|
+
class << self
|
10
|
+
def create(options = {})
|
11
|
+
options.keys.each do |key|
|
12
|
+
options[key.to_sym] = options.delete(key)
|
13
|
+
end
|
14
|
+
|
15
|
+
options[:id] = "Sidekiq-#{Sidekiq.server? ? "server" : "client"}-PID-#{::Process.pid}" unless options.key?(:id)
|
16
|
+
options[:url] ||= determine_redis_provider
|
17
|
+
|
18
|
+
size = if options[:size]
|
19
|
+
options[:size]
|
20
|
+
elsif Sidekiq.server?
|
21
|
+
# Give ourselves plenty of connections. pool is lazy
|
22
|
+
# so we won't create them until we need them.
|
23
|
+
Sidekiq.options[:concurrency] + 5
|
24
|
+
elsif ENV["RAILS_MAX_THREADS"]
|
25
|
+
Integer(ENV["RAILS_MAX_THREADS"])
|
26
|
+
else
|
27
|
+
5
|
28
|
+
end
|
29
|
+
|
30
|
+
verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server?
|
31
|
+
|
32
|
+
pool_timeout = options[:pool_timeout] || 1
|
33
|
+
log_info(options)
|
34
|
+
|
35
|
+
ConnectionPool.new(timeout: pool_timeout, size: size) do
|
36
|
+
build_client(options)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
private
|
41
|
+
|
42
|
+
# Sidekiq needs a lot of concurrent Redis connections.
|
43
|
+
#
|
44
|
+
# We need a connection for each Processor.
|
45
|
+
# We need a connection for Pro's real-time change listener
|
46
|
+
# We need a connection to various features to call Redis every few seconds:
|
47
|
+
# - the process heartbeat.
|
48
|
+
# - enterprise's leader election
|
49
|
+
# - enterprise's cron support
|
50
|
+
def verify_sizing(size, concurrency)
|
51
|
+
raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size < (concurrency + 2)
|
52
|
+
end
|
53
|
+
|
54
|
+
def build_client(options)
|
55
|
+
namespace = options[:namespace]
|
56
|
+
|
57
|
+
client = Redis.new client_opts(options)
|
58
|
+
if namespace
|
59
|
+
begin
|
60
|
+
require "redis/namespace"
|
61
|
+
Redis::Namespace.new(namespace, redis: client)
|
62
|
+
rescue LoadError
|
63
|
+
Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \
|
64
|
+
"Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.")
|
65
|
+
exit(-127)
|
66
|
+
end
|
67
|
+
else
|
68
|
+
client
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
def client_opts(options)
|
73
|
+
opts = options.dup
|
74
|
+
if opts[:namespace]
|
75
|
+
opts.delete(:namespace)
|
76
|
+
end
|
77
|
+
|
78
|
+
if opts[:network_timeout]
|
79
|
+
opts[:timeout] = opts[:network_timeout]
|
80
|
+
opts.delete(:network_timeout)
|
81
|
+
end
|
82
|
+
|
83
|
+
opts[:driver] ||= Redis::Connection.drivers.last || "ruby"
|
84
|
+
|
85
|
+
# Issue #3303, redis-rb will silently retry an operation.
|
86
|
+
# This can lead to duplicate jobs if Sidekiq::Client's LPUSH
|
87
|
+
# is performed twice but I believe this is much, much rarer
|
88
|
+
# than the reconnect silently fixing a problem; we keep it
|
89
|
+
# on by default.
|
90
|
+
opts[:reconnect_attempts] ||= 1
|
91
|
+
|
92
|
+
opts
|
93
|
+
end
|
94
|
+
|
95
|
+
def log_info(options)
|
96
|
+
# Don't log Redis AUTH password
|
97
|
+
redacted = "REDACTED"
|
98
|
+
scrubbed_options = options.dup
|
99
|
+
if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
|
100
|
+
uri.password = redacted
|
101
|
+
scrubbed_options[:url] = uri.to_s
|
102
|
+
end
|
103
|
+
if scrubbed_options[:password]
|
104
|
+
scrubbed_options[:password] = redacted
|
105
|
+
end
|
106
|
+
scrubbed_options[:sentinels]&.each do |sentinel|
|
107
|
+
sentinel[:password] = redacted if sentinel[:password]
|
108
|
+
end
|
109
|
+
if Sidekiq.server?
|
110
|
+
Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with redis options #{scrubbed_options}")
|
111
|
+
else
|
112
|
+
Sidekiq.logger.debug("#{Sidekiq::NAME} client with redis options #{scrubbed_options}")
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
def determine_redis_provider
|
117
|
+
# If you have this in your environment:
|
118
|
+
# MY_REDIS_URL=redis://hostname.example.com:1238/4
|
119
|
+
# then set:
|
120
|
+
# REDIS_PROVIDER=MY_REDIS_URL
|
121
|
+
# and Sidekiq will find your custom URL variable with no custom
|
122
|
+
# initialization code at all.
|
123
|
+
#
|
124
|
+
p = ENV["REDIS_PROVIDER"]
|
125
|
+
if p && p =~ /\:/
|
126
|
+
raise <<~EOM
|
127
|
+
REDIS_PROVIDER should be set to the name of the variable which contains the Redis URL, not a URL itself.
|
128
|
+
Platforms like Heroku will sell addons that publish a *_URL variable. You need to tell Sidekiq with REDIS_PROVIDER, e.g.:
|
129
|
+
|
130
|
+
REDISTOGO_URL=redis://somehost.example.com:6379/4
|
131
|
+
REDIS_PROVIDER=REDISTOGO_URL
|
132
|
+
EOM
|
133
|
+
end
|
134
|
+
|
135
|
+
ENV[
|
136
|
+
p || "REDIS_URL"
|
137
|
+
]
|
138
|
+
end
|
139
|
+
end
|
140
|
+
end
|
141
|
+
end
|
@@ -0,0 +1,173 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq"
|
4
|
+
require "sidekiq/util"
|
5
|
+
require "sidekiq/api"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
module Scheduled
|
9
|
+
SETS = %w[retry schedule]
|
10
|
+
|
11
|
+
class Enq
|
12
|
+
def enqueue_jobs(now = Time.now.to_f.to_s, sorted_sets = SETS)
|
13
|
+
# A job's "score" in Redis is the time at which it should be processed.
|
14
|
+
# Just check Redis for the set of jobs with a timestamp before now.
|
15
|
+
Sidekiq.redis do |conn|
|
16
|
+
sorted_sets.each do |sorted_set|
|
17
|
+
# Get next items in the queue with scores (time to execute) <= now.
|
18
|
+
until (jobs = conn.zrangebyscore(sorted_set, "-inf", now, limit: [0, 100])).empty?
|
19
|
+
# We need to go through the list one at a time to reduce the risk of something
|
20
|
+
# going wrong between the time jobs are popped from the scheduled queue and when
|
21
|
+
# they are pushed onto a work queue and losing the jobs.
|
22
|
+
jobs.each do |job|
|
23
|
+
# Pop item off the queue and add it to the work queue. If the job can't be popped from
|
24
|
+
# the queue, it's because another process already popped it so we can move on to the
|
25
|
+
# next one.
|
26
|
+
if conn.zrem(sorted_set, job)
|
27
|
+
Sidekiq::Client.push(Sidekiq.load_json(job))
|
28
|
+
Sidekiq.logger.debug { "enqueued #{sorted_set}: #{job}" }
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
##
|
38
|
+
# The Poller checks Redis every N seconds for jobs in the retry or scheduled
|
39
|
+
# set have passed their timestamp and should be enqueued. If so, it
|
40
|
+
# just pops the job back onto its original queue so the
|
41
|
+
# workers can pick it up like any other job.
|
42
|
+
class Poller
|
43
|
+
include Util
|
44
|
+
|
45
|
+
INITIAL_WAIT = 10
|
46
|
+
|
47
|
+
def initialize
|
48
|
+
@enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
|
49
|
+
@sleeper = ConnectionPool::TimedStack.new
|
50
|
+
@done = false
|
51
|
+
@thread = nil
|
52
|
+
end
|
53
|
+
|
54
|
+
# Shut down this instance, will pause until the thread is dead.
|
55
|
+
def terminate
|
56
|
+
@done = true
|
57
|
+
if @thread
|
58
|
+
t = @thread
|
59
|
+
@thread = nil
|
60
|
+
@sleeper << 0
|
61
|
+
t.value
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def start
|
66
|
+
@thread ||= safe_thread("scheduler") {
|
67
|
+
initial_wait
|
68
|
+
|
69
|
+
until @done
|
70
|
+
enqueue
|
71
|
+
wait
|
72
|
+
end
|
73
|
+
Sidekiq.logger.info("Scheduler exiting...")
|
74
|
+
}
|
75
|
+
end
|
76
|
+
|
77
|
+
def enqueue
|
78
|
+
@enq.enqueue_jobs
|
79
|
+
rescue => ex
|
80
|
+
# Most likely a problem with redis networking.
|
81
|
+
# Punt and try again at the next interval
|
82
|
+
logger.error ex.message
|
83
|
+
handle_exception(ex)
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
|
88
|
+
def wait
|
89
|
+
@sleeper.pop(random_poll_interval)
|
90
|
+
rescue Timeout::Error
|
91
|
+
# expected
|
92
|
+
rescue => ex
|
93
|
+
# if poll_interval_average hasn't been calculated yet, we can
|
94
|
+
# raise an error trying to reach Redis.
|
95
|
+
logger.error ex.message
|
96
|
+
handle_exception(ex)
|
97
|
+
sleep 5
|
98
|
+
end
|
99
|
+
|
100
|
+
def random_poll_interval
|
101
|
+
# We want one Sidekiq process to schedule jobs every N seconds. We have M processes
|
102
|
+
# and **don't** want to coordinate.
|
103
|
+
#
|
104
|
+
# So in N*M second timespan, we want each process to schedule once. The basic loop is:
|
105
|
+
#
|
106
|
+
# * sleep a random amount within that N*M timespan
|
107
|
+
# * wake up and schedule
|
108
|
+
#
|
109
|
+
# We want to avoid one edge case: imagine a set of 2 processes, scheduling every 5 seconds,
|
110
|
+
# so N*M = 10. Each process decides to randomly sleep 8 seconds, now we've failed to meet
|
111
|
+
# that 5 second average. Thankfully each schedule cycle will sleep randomly so the next
|
112
|
+
# iteration could see each process sleep for 1 second, undercutting our average.
|
113
|
+
#
|
114
|
+
# So below 10 processes, we special case and ensure the processes sleep closer to the average.
|
115
|
+
# In the example above, each process should schedule every 10 seconds on average. We special
|
116
|
+
# case smaller clusters to add 50% so they would sleep somewhere between 5 and 15 seconds.
|
117
|
+
# As we run more processes, the scheduling interval average will approach an even spread
|
118
|
+
# between 0 and poll interval so we don't need this artifical boost.
|
119
|
+
#
|
120
|
+
if process_count < 10
|
121
|
+
# For small clusters, calculate a random interval that is ±50% the desired average.
|
122
|
+
poll_interval_average * rand + poll_interval_average.to_f / 2
|
123
|
+
else
|
124
|
+
# With 10+ processes, we should have enough randomness to get decent polling
|
125
|
+
# across the entire timespan
|
126
|
+
poll_interval_average * rand
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
# We do our best to tune the poll interval to the size of the active Sidekiq
|
131
|
+
# cluster. If you have 30 processes and poll every 15 seconds, that means one
|
132
|
+
# Sidekiq is checking Redis every 0.5 seconds - way too often for most people
|
133
|
+
# and really bad if the retry or scheduled sets are large.
|
134
|
+
#
|
135
|
+
# Instead try to avoid polling more than once every 15 seconds. If you have
|
136
|
+
# 30 Sidekiq processes, we'll poll every 30 * 15 or 450 seconds.
|
137
|
+
# To keep things statistically random, we'll sleep a random amount between
|
138
|
+
# 225 and 675 seconds for each poll or 450 seconds on average. Otherwise restarting
|
139
|
+
# all your Sidekiq processes at the same time will lead to them all polling at
|
140
|
+
# the same time: the thundering herd problem.
|
141
|
+
#
|
142
|
+
# We only do this if poll_interval_average is unset (the default).
|
143
|
+
def poll_interval_average
|
144
|
+
Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval
|
145
|
+
end
|
146
|
+
|
147
|
+
# Calculates an average poll interval based on the number of known Sidekiq processes.
|
148
|
+
# This minimizes a single point of failure by dispersing check-ins but without taxing
|
149
|
+
# Redis if you run many Sidekiq processes.
|
150
|
+
def scaled_poll_interval
|
151
|
+
process_count * Sidekiq.options[:average_scheduled_poll_interval]
|
152
|
+
end
|
153
|
+
|
154
|
+
def process_count
|
155
|
+
pcount = Sidekiq::ProcessSet.new.size
|
156
|
+
pcount = 1 if pcount == 0
|
157
|
+
pcount
|
158
|
+
end
|
159
|
+
|
160
|
+
def initial_wait
|
161
|
+
# Have all processes sleep between 5-15 seconds. 10 seconds
|
162
|
+
# to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
|
163
|
+
# of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
|
164
|
+
total = 0
|
165
|
+
total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average]
|
166
|
+
total += (5 * rand)
|
167
|
+
|
168
|
+
@sleeper.pop(total)
|
169
|
+
rescue Timeout::Error
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
173
|
+
end
|
@@ -0,0 +1,344 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "securerandom"
|
4
|
+
require "sidekiq"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
class Testing
|
8
|
+
class << self
|
9
|
+
attr_accessor :__test_mode
|
10
|
+
|
11
|
+
def __set_test_mode(mode)
|
12
|
+
if block_given?
|
13
|
+
current_mode = __test_mode
|
14
|
+
begin
|
15
|
+
self.__test_mode = mode
|
16
|
+
yield
|
17
|
+
ensure
|
18
|
+
self.__test_mode = current_mode
|
19
|
+
end
|
20
|
+
else
|
21
|
+
self.__test_mode = mode
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
def disable!(&block)
|
26
|
+
__set_test_mode(:disable, &block)
|
27
|
+
end
|
28
|
+
|
29
|
+
def fake!(&block)
|
30
|
+
__set_test_mode(:fake, &block)
|
31
|
+
end
|
32
|
+
|
33
|
+
def inline!(&block)
|
34
|
+
__set_test_mode(:inline, &block)
|
35
|
+
end
|
36
|
+
|
37
|
+
def enabled?
|
38
|
+
__test_mode != :disable
|
39
|
+
end
|
40
|
+
|
41
|
+
def disabled?
|
42
|
+
__test_mode == :disable
|
43
|
+
end
|
44
|
+
|
45
|
+
def fake?
|
46
|
+
__test_mode == :fake
|
47
|
+
end
|
48
|
+
|
49
|
+
def inline?
|
50
|
+
__test_mode == :inline
|
51
|
+
end
|
52
|
+
|
53
|
+
def server_middleware
|
54
|
+
@server_chain ||= Middleware::Chain.new
|
55
|
+
yield @server_chain if block_given?
|
56
|
+
@server_chain
|
57
|
+
end
|
58
|
+
|
59
|
+
def constantize(str)
|
60
|
+
names = str.split("::")
|
61
|
+
names.shift if names.empty? || names.first.empty?
|
62
|
+
|
63
|
+
names.inject(Object) do |constant, name|
|
64
|
+
constant.const_defined?(name) ? constant.const_get(name) : constant.const_missing(name)
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
# Default to fake testing to keep old behavior
|
71
|
+
Sidekiq::Testing.fake!
|
72
|
+
|
73
|
+
class EmptyQueueError < RuntimeError; end
|
74
|
+
|
75
|
+
module TestingClient
|
76
|
+
def raw_push(payloads)
|
77
|
+
if Sidekiq::Testing.fake?
|
78
|
+
payloads.each do |job|
|
79
|
+
job = Sidekiq.load_json(Sidekiq.dump_json(job))
|
80
|
+
job["enqueued_at"] = Time.now.to_f unless job["at"]
|
81
|
+
Queues.push(job["queue"], job["class"], job)
|
82
|
+
end
|
83
|
+
true
|
84
|
+
elsif Sidekiq::Testing.inline?
|
85
|
+
payloads.each do |job|
|
86
|
+
klass = Sidekiq::Testing.constantize(job["class"])
|
87
|
+
job["id"] ||= SecureRandom.hex(12)
|
88
|
+
job_hash = Sidekiq.load_json(Sidekiq.dump_json(job))
|
89
|
+
klass.process_job(job_hash)
|
90
|
+
end
|
91
|
+
true
|
92
|
+
else
|
93
|
+
super
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
Sidekiq::Client.prepend TestingClient
|
99
|
+
|
100
|
+
module Queues
|
101
|
+
##
|
102
|
+
# The Queues class is only for testing the fake queue implementation.
|
103
|
+
# There are 2 data structures involved in tandem. This is due to the
|
104
|
+
# Rspec syntax of change(QueueWorker.jobs, :size). It keeps a reference
|
105
|
+
# to the array. Because the array was dervied from a filter of the total
|
106
|
+
# jobs enqueued, it appeared as though the array didn't change.
|
107
|
+
#
|
108
|
+
# To solve this, we'll keep 2 hashes containing the jobs. One with keys based
|
109
|
+
# on the queue, and another with keys of the worker names, so the array for
|
110
|
+
# QueueWorker.jobs is a straight reference to a real array.
|
111
|
+
#
|
112
|
+
# Queue-based hash:
|
113
|
+
#
|
114
|
+
# {
|
115
|
+
# "default"=>[
|
116
|
+
# {
|
117
|
+
# "class"=>"TestTesting::QueueWorker",
|
118
|
+
# "args"=>[1, 2],
|
119
|
+
# "retry"=>true,
|
120
|
+
# "queue"=>"default",
|
121
|
+
# "jid"=>"abc5b065c5c4b27fc1102833",
|
122
|
+
# "created_at"=>1447445554.419934
|
123
|
+
# }
|
124
|
+
# ]
|
125
|
+
# }
|
126
|
+
#
|
127
|
+
# Worker-based hash:
|
128
|
+
#
|
129
|
+
# {
|
130
|
+
# "TestTesting::QueueWorker"=>[
|
131
|
+
# {
|
132
|
+
# "class"=>"TestTesting::QueueWorker",
|
133
|
+
# "args"=>[1, 2],
|
134
|
+
# "retry"=>true,
|
135
|
+
# "queue"=>"default",
|
136
|
+
# "jid"=>"abc5b065c5c4b27fc1102833",
|
137
|
+
# "created_at"=>1447445554.419934
|
138
|
+
# }
|
139
|
+
# ]
|
140
|
+
# }
|
141
|
+
#
|
142
|
+
# Example:
|
143
|
+
#
|
144
|
+
# require 'sidekiq/testing'
|
145
|
+
#
|
146
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
147
|
+
# HardWorker.perform_async(:something)
|
148
|
+
# assert_equal 1, Sidekiq::Queues["default"].size
|
149
|
+
# assert_equal :something, Sidekiq::Queues["default"].first['args'][0]
|
150
|
+
#
|
151
|
+
# You can also clear all workers' jobs:
|
152
|
+
#
|
153
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
154
|
+
# HardWorker.perform_async(:something)
|
155
|
+
# Sidekiq::Queues.clear_all
|
156
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
157
|
+
#
|
158
|
+
# This can be useful to make sure jobs don't linger between tests:
|
159
|
+
#
|
160
|
+
# RSpec.configure do |config|
|
161
|
+
# config.before(:each) do
|
162
|
+
# Sidekiq::Queues.clear_all
|
163
|
+
# end
|
164
|
+
# end
|
165
|
+
#
|
166
|
+
class << self
|
167
|
+
def [](queue)
|
168
|
+
jobs_by_queue[queue]
|
169
|
+
end
|
170
|
+
|
171
|
+
def push(queue, klass, job)
|
172
|
+
jobs_by_queue[queue] << job
|
173
|
+
jobs_by_worker[klass] << job
|
174
|
+
end
|
175
|
+
|
176
|
+
def jobs_by_queue
|
177
|
+
@jobs_by_queue ||= Hash.new { |hash, key| hash[key] = [] }
|
178
|
+
end
|
179
|
+
|
180
|
+
def jobs_by_worker
|
181
|
+
@jobs_by_worker ||= Hash.new { |hash, key| hash[key] = [] }
|
182
|
+
end
|
183
|
+
|
184
|
+
def delete_for(jid, queue, klass)
|
185
|
+
jobs_by_queue[queue.to_s].delete_if { |job| job["jid"] == jid }
|
186
|
+
jobs_by_worker[klass].delete_if { |job| job["jid"] == jid }
|
187
|
+
end
|
188
|
+
|
189
|
+
def clear_for(queue, klass)
|
190
|
+
jobs_by_queue[queue].clear
|
191
|
+
jobs_by_worker[klass].clear
|
192
|
+
end
|
193
|
+
|
194
|
+
def clear_all
|
195
|
+
jobs_by_queue.clear
|
196
|
+
jobs_by_worker.clear
|
197
|
+
end
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
module Worker
|
202
|
+
##
|
203
|
+
# The Sidekiq testing infrastructure overrides perform_async
|
204
|
+
# so that it does not actually touch the network. Instead it
|
205
|
+
# stores the asynchronous jobs in a per-class array so that
|
206
|
+
# their presence/absence can be asserted by your tests.
|
207
|
+
#
|
208
|
+
# This is similar to ActionMailer's :test delivery_method and its
|
209
|
+
# ActionMailer::Base.deliveries array.
|
210
|
+
#
|
211
|
+
# Example:
|
212
|
+
#
|
213
|
+
# require 'sidekiq/testing'
|
214
|
+
#
|
215
|
+
# assert_equal 0, HardWorker.jobs.size
|
216
|
+
# HardWorker.perform_async(:something)
|
217
|
+
# assert_equal 1, HardWorker.jobs.size
|
218
|
+
# assert_equal :something, HardWorker.jobs[0]['args'][0]
|
219
|
+
#
|
220
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
221
|
+
# MyMailer.delay.send_welcome_email('foo@example.com')
|
222
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
|
223
|
+
#
|
224
|
+
# You can also clear and drain all workers' jobs:
|
225
|
+
#
|
226
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
227
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
|
228
|
+
#
|
229
|
+
# MyMailer.delay.send_welcome_email('foo@example.com')
|
230
|
+
# MyModel.delay.do_something_hard
|
231
|
+
#
|
232
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
|
233
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedModel.jobs.size
|
234
|
+
#
|
235
|
+
# Sidekiq::Worker.clear_all # or .drain_all
|
236
|
+
#
|
237
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
238
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
|
239
|
+
#
|
240
|
+
# This can be useful to make sure jobs don't linger between tests:
|
241
|
+
#
|
242
|
+
# RSpec.configure do |config|
|
243
|
+
# config.before(:each) do
|
244
|
+
# Sidekiq::Worker.clear_all
|
245
|
+
# end
|
246
|
+
# end
|
247
|
+
#
|
248
|
+
# or for acceptance testing, i.e. with cucumber:
|
249
|
+
#
|
250
|
+
# AfterStep do
|
251
|
+
# Sidekiq::Worker.drain_all
|
252
|
+
# end
|
253
|
+
#
|
254
|
+
# When I sign up as "foo@example.com"
|
255
|
+
# Then I should receive a welcome email to "foo@example.com"
|
256
|
+
#
|
257
|
+
module ClassMethods
|
258
|
+
# Queue for this worker
|
259
|
+
def queue
|
260
|
+
get_sidekiq_options["queue"]
|
261
|
+
end
|
262
|
+
|
263
|
+
# Jobs queued for this worker
|
264
|
+
def jobs
|
265
|
+
Queues.jobs_by_worker[to_s]
|
266
|
+
end
|
267
|
+
|
268
|
+
# Clear all jobs for this worker
|
269
|
+
def clear
|
270
|
+
Queues.clear_for(queue, to_s)
|
271
|
+
end
|
272
|
+
|
273
|
+
# Drain and run all jobs for this worker
|
274
|
+
def drain
|
275
|
+
while jobs.any?
|
276
|
+
next_job = jobs.first
|
277
|
+
Queues.delete_for(next_job["jid"], next_job["queue"], to_s)
|
278
|
+
process_job(next_job)
|
279
|
+
end
|
280
|
+
end
|
281
|
+
|
282
|
+
# Pop out a single job and perform it
|
283
|
+
def perform_one
|
284
|
+
raise(EmptyQueueError, "perform_one called with empty job queue") if jobs.empty?
|
285
|
+
next_job = jobs.first
|
286
|
+
Queues.delete_for(next_job["jid"], queue, to_s)
|
287
|
+
process_job(next_job)
|
288
|
+
end
|
289
|
+
|
290
|
+
def process_job(job)
|
291
|
+
worker = new
|
292
|
+
worker.jid = job["jid"]
|
293
|
+
worker.bid = job["bid"] if worker.respond_to?(:bid=)
|
294
|
+
Sidekiq::Testing.server_middleware.invoke(worker, job, job["queue"]) do
|
295
|
+
execute_job(worker, job["args"])
|
296
|
+
end
|
297
|
+
end
|
298
|
+
|
299
|
+
def execute_job(worker, args)
|
300
|
+
worker.perform(*args)
|
301
|
+
end
|
302
|
+
end
|
303
|
+
|
304
|
+
class << self
|
305
|
+
def jobs # :nodoc:
|
306
|
+
Queues.jobs_by_queue.values.flatten
|
307
|
+
end
|
308
|
+
|
309
|
+
# Clear all queued jobs across all workers
|
310
|
+
def clear_all
|
311
|
+
Queues.clear_all
|
312
|
+
end
|
313
|
+
|
314
|
+
# Drain all queued jobs across all workers
|
315
|
+
def drain_all
|
316
|
+
while jobs.any?
|
317
|
+
worker_classes = jobs.map { |job| job["class"] }.uniq
|
318
|
+
|
319
|
+
worker_classes.each do |worker_class|
|
320
|
+
Sidekiq::Testing.constantize(worker_class).drain
|
321
|
+
end
|
322
|
+
end
|
323
|
+
end
|
324
|
+
end
|
325
|
+
end
|
326
|
+
|
327
|
+
module TestingExtensions
|
328
|
+
def jobs_for(klass)
|
329
|
+
jobs.select do |job|
|
330
|
+
marshalled = job["args"][0]
|
331
|
+
marshalled.index(klass.to_s) && YAML.load(marshalled)[0] == klass
|
332
|
+
end
|
333
|
+
end
|
334
|
+
end
|
335
|
+
|
336
|
+
Sidekiq::Extensions::DelayedMailer.extend(TestingExtensions) if defined?(Sidekiq::Extensions::DelayedMailer)
|
337
|
+
Sidekiq::Extensions::DelayedModel.extend(TestingExtensions) if defined?(Sidekiq::Extensions::DelayedModel)
|
338
|
+
end
|
339
|
+
|
340
|
+
if defined?(::Rails) && Rails.respond_to?(:env) && !Rails.env.test?
|
341
|
+
puts("**************************************************")
|
342
|
+
puts("⛔️ WARNING: Sidekiq testing API enabled, but this is not the test environment. Your jobs will not go to Redis.")
|
343
|
+
puts("**************************************************")
|
344
|
+
end
|