sidekiq_cleaner 5.3.6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.circleci/config.yml +61 -0
- data/.github/contributing.md +32 -0
- data/.github/issue_template.md +11 -0
- data/.gitignore +15 -0
- data/.travis.yml +11 -0
- data/3.0-Upgrade.md +70 -0
- data/4.0-Upgrade.md +53 -0
- data/5.0-Upgrade.md +56 -0
- data/COMM-LICENSE +97 -0
- data/Changes.md +1536 -0
- data/Ent-Changes.md +238 -0
- data/Gemfile +23 -0
- data/LICENSE +9 -0
- data/Pro-2.0-Upgrade.md +138 -0
- data/Pro-3.0-Upgrade.md +44 -0
- data/Pro-4.0-Upgrade.md +35 -0
- data/Pro-Changes.md +759 -0
- data/README.md +55 -0
- data/Rakefile +9 -0
- data/bin/sidekiq +18 -0
- data/bin/sidekiqctl +20 -0
- data/bin/sidekiqload +149 -0
- data/cleaner/assets/images/favicon.ico +0 -0
- data/cleaner/assets/images/logo.png +0 -0
- data/cleaner/assets/images/status.png +0 -0
- data/cleaner/assets/javascripts/application.js +172 -0
- data/cleaner/assets/javascripts/dashboard.js +315 -0
- data/cleaner/assets/stylesheets/application-rtl.css +246 -0
- data/cleaner/assets/stylesheets/application.css +1144 -0
- data/cleaner/assets/stylesheets/bootstrap-rtl.min.css +9 -0
- data/cleaner/assets/stylesheets/bootstrap.css +5 -0
- data/cleaner/locales/ar.yml +81 -0
- data/cleaner/locales/cs.yml +78 -0
- data/cleaner/locales/da.yml +68 -0
- data/cleaner/locales/de.yml +69 -0
- data/cleaner/locales/el.yml +68 -0
- data/cleaner/locales/en.yml +81 -0
- data/cleaner/locales/es.yml +70 -0
- data/cleaner/locales/fa.yml +80 -0
- data/cleaner/locales/fr.yml +78 -0
- data/cleaner/locales/he.yml +79 -0
- data/cleaner/locales/hi.yml +75 -0
- data/cleaner/locales/it.yml +69 -0
- data/cleaner/locales/ja.yml +80 -0
- data/cleaner/locales/ko.yml +68 -0
- data/cleaner/locales/nb.yml +77 -0
- data/cleaner/locales/nl.yml +68 -0
- data/cleaner/locales/pl.yml +59 -0
- data/cleaner/locales/pt-br.yml +68 -0
- data/cleaner/locales/pt.yml +67 -0
- data/cleaner/locales/ru.yml +78 -0
- data/cleaner/locales/sv.yml +68 -0
- data/cleaner/locales/ta.yml +75 -0
- data/cleaner/locales/uk.yml +76 -0
- data/cleaner/locales/ur.yml +80 -0
- data/cleaner/locales/zh-cn.yml +68 -0
- data/cleaner/locales/zh-tw.yml +68 -0
- data/cleaner/views/_footer.erb +20 -0
- data/cleaner/views/_job_info.erb +88 -0
- data/cleaner/views/_nav.erb +52 -0
- data/cleaner/views/_paging.erb +23 -0
- data/cleaner/views/_poll_link.erb +7 -0
- data/cleaner/views/_status.erb +4 -0
- data/cleaner/views/_summary.erb +40 -0
- data/cleaner/views/busy.erb +98 -0
- data/cleaner/views/dashboard.erb +75 -0
- data/cleaner/views/dead.erb +34 -0
- data/cleaner/views/errors.erb +84 -0
- data/cleaner/views/layout.erb +40 -0
- data/cleaner/views/morgue.erb +75 -0
- data/cleaner/views/queue.erb +46 -0
- data/cleaner/views/queues.erb +30 -0
- data/cleaner/views/retries.erb +80 -0
- data/cleaner/views/retry.erb +34 -0
- data/cleaner/views/scheduled.erb +54 -0
- data/cleaner/views/scheduled_job_info.erb +8 -0
- data/cleaner-stats.png +0 -0
- data/cleaner.png +0 -0
- data/code_of_conduct.md +50 -0
- data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
- data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
- data/lib/generators/sidekiq/worker_generator.rb +49 -0
- data/lib/sidekiq/api.rb +940 -0
- data/lib/sidekiq/cleaner/action.rb +89 -0
- data/lib/sidekiq/cleaner/application.rb +385 -0
- data/lib/sidekiq/cleaner/helpers.rb +325 -0
- data/lib/sidekiq/cleaner/router.rb +100 -0
- data/lib/sidekiq/cleaner.rb +214 -0
- data/lib/sidekiq/cli.rb +445 -0
- data/lib/sidekiq/client.rb +243 -0
- data/lib/sidekiq/core_ext.rb +1 -0
- data/lib/sidekiq/ctl.rb +221 -0
- data/lib/sidekiq/delay.rb +42 -0
- data/lib/sidekiq/exception_handler.rb +29 -0
- data/lib/sidekiq/extensions/action_mailer.rb +57 -0
- data/lib/sidekiq/extensions/active_record.rb +40 -0
- data/lib/sidekiq/extensions/class_methods.rb +40 -0
- data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
- data/lib/sidekiq/fetch.rb +81 -0
- data/lib/sidekiq/job_logger.rb +25 -0
- data/lib/sidekiq/job_retry.rb +262 -0
- data/lib/sidekiq/launcher.rb +173 -0
- data/lib/sidekiq/logging.rb +122 -0
- data/lib/sidekiq/manager.rb +137 -0
- data/lib/sidekiq/middleware/chain.rb +150 -0
- data/lib/sidekiq/middleware/i18n.rb +42 -0
- data/lib/sidekiq/middleware/server/active_record.rb +23 -0
- data/lib/sidekiq/paginator.rb +43 -0
- data/lib/sidekiq/processor.rb +279 -0
- data/lib/sidekiq/rails.rb +58 -0
- data/lib/sidekiq/redis_connection.rb +144 -0
- data/lib/sidekiq/scheduled.rb +174 -0
- data/lib/sidekiq/testing/inline.rb +29 -0
- data/lib/sidekiq/testing.rb +333 -0
- data/lib/sidekiq/util.rb +66 -0
- data/lib/sidekiq/version.rb +4 -0
- data/lib/sidekiq/worker.rb +220 -0
- data/lib/sidekiq.rb +237 -0
- data/sidekiq_cleaner.gemspec +21 -0
- metadata +235 -0
@@ -0,0 +1,144 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'connection_pool'
|
3
|
+
require 'redis'
|
4
|
+
require 'uri'
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
class RedisConnection
|
8
|
+
class << self
|
9
|
+
|
10
|
+
def create(options={})
|
11
|
+
options.keys.each do |key|
|
12
|
+
options[key.to_sym] = options.delete(key)
|
13
|
+
end
|
14
|
+
|
15
|
+
options[:id] = "Sidekiq-#{Sidekiq.server? ? "server" : "client"}-PID-#{$$}" if !options.has_key?(:id)
|
16
|
+
options[:url] ||= determine_redis_provider
|
17
|
+
|
18
|
+
size = if options[:size]
|
19
|
+
options[:size]
|
20
|
+
elsif Sidekiq.server?
|
21
|
+
Sidekiq.options[:concurrency] + 5
|
22
|
+
elsif ENV['RAILS_MAX_THREADS']
|
23
|
+
Integer(ENV['RAILS_MAX_THREADS'])
|
24
|
+
else
|
25
|
+
5
|
26
|
+
end
|
27
|
+
|
28
|
+
verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server?
|
29
|
+
|
30
|
+
pool_timeout = options[:pool_timeout] || 1
|
31
|
+
log_info(options)
|
32
|
+
|
33
|
+
ConnectionPool.new(:timeout => pool_timeout, :size => size) do
|
34
|
+
build_client(options)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
private
|
39
|
+
|
40
|
+
# Sidekiq needs a lot of concurrent Redis connections.
|
41
|
+
#
|
42
|
+
# We need a connection for each Processor.
|
43
|
+
# We need a connection for Pro's real-time change listener
|
44
|
+
# We need a connection to various features to call Redis every few seconds:
|
45
|
+
# - the process heartbeat.
|
46
|
+
# - enterprise's leader election
|
47
|
+
# - enterprise's cron support
|
48
|
+
def verify_sizing(size, concurrency)
|
49
|
+
raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work. Your pool has #{size} connections but must have at least #{concurrency + 2}" if size <= concurrency
|
50
|
+
end
|
51
|
+
|
52
|
+
def build_client(options)
|
53
|
+
namespace = options[:namespace]
|
54
|
+
|
55
|
+
client = Redis.new client_opts(options)
|
56
|
+
if namespace
|
57
|
+
begin
|
58
|
+
require 'redis/namespace'
|
59
|
+
Redis::Namespace.new(namespace, :redis => client)
|
60
|
+
rescue LoadError
|
61
|
+
Sidekiq.logger.error("Your Redis configuration uses the namespace '#{namespace}' but the redis-namespace gem is not included in the Gemfile." \
|
62
|
+
"Add the gem to your Gemfile to continue using a namespace. Otherwise, remove the namespace parameter.")
|
63
|
+
exit(-127)
|
64
|
+
end
|
65
|
+
else
|
66
|
+
client
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
def client_opts(options)
|
71
|
+
opts = options.dup
|
72
|
+
if opts[:namespace]
|
73
|
+
opts.delete(:namespace)
|
74
|
+
end
|
75
|
+
|
76
|
+
if opts[:network_timeout]
|
77
|
+
opts[:timeout] = opts[:network_timeout]
|
78
|
+
opts.delete(:network_timeout)
|
79
|
+
end
|
80
|
+
|
81
|
+
opts[:driver] ||= Redis::Connection.drivers.last || 'ruby'
|
82
|
+
|
83
|
+
# Issue #3303, redis-rb will silently retry an operation.
|
84
|
+
# This can lead to duplicate jobs if Sidekiq::Client's LPUSH
|
85
|
+
# is performed twice but I believe this is much, much rarer
|
86
|
+
# than the reconnect silently fixing a problem; we keep it
|
87
|
+
# on by default.
|
88
|
+
opts[:reconnect_attempts] ||= 1
|
89
|
+
|
90
|
+
opts
|
91
|
+
end
|
92
|
+
|
93
|
+
def log_info(options)
|
94
|
+
# Don't log Redis AUTH password
|
95
|
+
redacted = "REDACTED"
|
96
|
+
scrubbed_options = options.dup
|
97
|
+
if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
|
98
|
+
uri.password = redacted
|
99
|
+
scrubbed_options[:url] = uri.to_s
|
100
|
+
end
|
101
|
+
if scrubbed_options[:password]
|
102
|
+
scrubbed_options[:password] = redacted
|
103
|
+
end
|
104
|
+
if Sidekiq.server?
|
105
|
+
Sidekiq.logger.info("Booting Sidekiq #{Sidekiq::VERSION} with redis options #{scrubbed_options}")
|
106
|
+
else
|
107
|
+
Sidekiq.logger.debug("#{Sidekiq::NAME} client with redis options #{scrubbed_options}")
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
def determine_redis_provider
|
112
|
+
# If you have this in your environment:
|
113
|
+
# MY_REDIS_URL=redis://hostname.example.com:1238/4
|
114
|
+
# then set:
|
115
|
+
# REDIS_PROVIDER=MY_REDIS_URL
|
116
|
+
# and Sidekiq will find your custom URL variable with no custom
|
117
|
+
# initialization code at all.
|
118
|
+
p = ENV['REDIS_PROVIDER']
|
119
|
+
if p && p =~ /\:/
|
120
|
+
Sidekiq.logger.error <<-EOM
|
121
|
+
|
122
|
+
#################################################################################
|
123
|
+
|
124
|
+
REDIS_PROVIDER should be set to the **name** of the variable which contains the Redis URL, not a URL itself.
|
125
|
+
Platforms like Heroku sell addons that publish a *_URL variable. You tell Sidekiq with REDIS_PROVIDER, e.g.:
|
126
|
+
|
127
|
+
REDIS_PROVIDER=REDISTOGO_URL
|
128
|
+
REDISTOGO_URL=redis://somehost.example.com:6379/4
|
129
|
+
|
130
|
+
Use REDIS_URL if you wish to point Sidekiq to a URL directly.
|
131
|
+
|
132
|
+
This configuration error will crash starting in Sidekiq 5.3.
|
133
|
+
|
134
|
+
#################################################################################
|
135
|
+
EOM
|
136
|
+
end
|
137
|
+
ENV[
|
138
|
+
ENV['REDIS_PROVIDER'] || 'REDIS_URL'
|
139
|
+
]
|
140
|
+
end
|
141
|
+
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
@@ -0,0 +1,174 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'sidekiq'
|
3
|
+
require 'sidekiq/util'
|
4
|
+
require 'sidekiq/api'
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
module Scheduled
|
8
|
+
SETS = %w(retry schedule)
|
9
|
+
|
10
|
+
class Enq
|
11
|
+
def enqueue_jobs(now=Time.now.to_f.to_s, sorted_sets=SETS)
|
12
|
+
# A job's "score" in Redis is the time at which it should be processed.
|
13
|
+
# Just check Redis for the set of jobs with a timestamp before now.
|
14
|
+
Sidekiq.redis do |conn|
|
15
|
+
sorted_sets.each do |sorted_set|
|
16
|
+
# Get the next item in the queue if it's score (time to execute) is <= now.
|
17
|
+
# We need to go through the list one at a time to reduce the risk of something
|
18
|
+
# going wrong between the time jobs are popped from the scheduled queue and when
|
19
|
+
# they are pushed onto a work queue and losing the jobs.
|
20
|
+
while job = conn.zrangebyscore(sorted_set, '-inf', now, :limit => [0, 1]).first do
|
21
|
+
|
22
|
+
# Pop item off the queue and add it to the work queue. If the job can't be popped from
|
23
|
+
# the queue, it's because another process already popped it so we can move on to the
|
24
|
+
# next one.
|
25
|
+
if conn.zrem(sorted_set, job)
|
26
|
+
Sidekiq::Client.push(Sidekiq.load_json(job))
|
27
|
+
Sidekiq::Logging.logger.debug { "enqueued #{sorted_set}: #{job}" }
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
##
|
36
|
+
# The Poller checks Redis every N seconds for jobs in the retry or scheduled
|
37
|
+
# set have passed their timestamp and should be enqueued. If so, it
|
38
|
+
# just pops the job back onto its original queue so the
|
39
|
+
# workers can pick it up like any other job.
|
40
|
+
class Poller
|
41
|
+
include Util
|
42
|
+
|
43
|
+
INITIAL_WAIT = 10
|
44
|
+
|
45
|
+
def initialize
|
46
|
+
@enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
|
47
|
+
@sleeper = ConnectionPool::TimedStack.new
|
48
|
+
@done = false
|
49
|
+
@thread = nil
|
50
|
+
end
|
51
|
+
|
52
|
+
# Shut down this instance, will pause until the thread is dead.
|
53
|
+
def terminate
|
54
|
+
@done = true
|
55
|
+
if @thread
|
56
|
+
t = @thread
|
57
|
+
@thread = nil
|
58
|
+
@sleeper << 0
|
59
|
+
t.value
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def start
|
64
|
+
@thread ||= safe_thread("scheduler") do
|
65
|
+
initial_wait
|
66
|
+
|
67
|
+
while !@done
|
68
|
+
enqueue
|
69
|
+
wait
|
70
|
+
end
|
71
|
+
Sidekiq.logger.info("Scheduler exiting...")
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
def enqueue
|
76
|
+
begin
|
77
|
+
@enq.enqueue_jobs
|
78
|
+
rescue => ex
|
79
|
+
# Most likely a problem with redis networking.
|
80
|
+
# Punt and try again at the next interval
|
81
|
+
logger.error ex.message
|
82
|
+
handle_exception(ex)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
|
88
|
+
def wait
|
89
|
+
@sleeper.pop(random_poll_interval)
|
90
|
+
rescue Timeout::Error
|
91
|
+
# expected
|
92
|
+
rescue => ex
|
93
|
+
# if poll_interval_average hasn't been calculated yet, we can
|
94
|
+
# raise an error trying to reach Redis.
|
95
|
+
logger.error ex.message
|
96
|
+
handle_exception(ex)
|
97
|
+
sleep 5
|
98
|
+
end
|
99
|
+
|
100
|
+
def random_poll_interval
|
101
|
+
# We want one Sidekiq process to schedule jobs every N seconds. We have M processes
|
102
|
+
# and **don't** want to coordinate.
|
103
|
+
#
|
104
|
+
# So in N*M second timespan, we want each process to schedule once. The basic loop is:
|
105
|
+
#
|
106
|
+
# * sleep a random amount within that N*M timespan
|
107
|
+
# * wake up and schedule
|
108
|
+
#
|
109
|
+
# We want to avoid one edge case: imagine a set of 2 processes, scheduling every 5 seconds,
|
110
|
+
# so N*M = 10. Each process decides to randomly sleep 8 seconds, now we've failed to meet
|
111
|
+
# that 5 second average. Thankfully each schedule cycle will sleep randomly so the next
|
112
|
+
# iteration could see each process sleep for 1 second, undercutting our average.
|
113
|
+
#
|
114
|
+
# So below 10 processes, we special case and ensure the processes sleep closer to the average.
|
115
|
+
# In the example above, each process should schedule every 10 seconds on average. We special
|
116
|
+
# case smaller clusters to add 50% so they would sleep somewhere between 5 and 15 seconds.
|
117
|
+
# As we run more processes, the scheduling interval average will approach an even spread
|
118
|
+
# between 0 and poll interval so we don't need this artifical boost.
|
119
|
+
#
|
120
|
+
if process_count < 10
|
121
|
+
# For small clusters, calculate a random interval that is ±50% the desired average.
|
122
|
+
poll_interval_average * rand + poll_interval_average.to_f / 2
|
123
|
+
else
|
124
|
+
# With 10+ processes, we should have enough randomness to get decent polling
|
125
|
+
# across the entire timespan
|
126
|
+
poll_interval_average * rand
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
# We do our best to tune the poll interval to the size of the active Sidekiq
|
131
|
+
# cluster. If you have 30 processes and poll every 15 seconds, that means one
|
132
|
+
# Sidekiq is checking Redis every 0.5 seconds - way too often for most people
|
133
|
+
# and really bad if the retry or scheduled sets are large.
|
134
|
+
#
|
135
|
+
# Instead try to avoid polling more than once every 15 seconds. If you have
|
136
|
+
# 30 Sidekiq processes, we'll poll every 30 * 15 or 450 seconds.
|
137
|
+
# To keep things statistically random, we'll sleep a random amount between
|
138
|
+
# 225 and 675 seconds for each poll or 450 seconds on average. Otherwise restarting
|
139
|
+
# all your Sidekiq processes at the same time will lead to them all polling at
|
140
|
+
# the same time: the thundering herd problem.
|
141
|
+
#
|
142
|
+
# We only do this if poll_interval_average is unset (the default).
|
143
|
+
def poll_interval_average
|
144
|
+
Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval
|
145
|
+
end
|
146
|
+
|
147
|
+
# Calculates an average poll interval based on the number of known Sidekiq processes.
|
148
|
+
# This minimizes a single point of failure by dispersing check-ins but without taxing
|
149
|
+
# Redis if you run many Sidekiq processes.
|
150
|
+
def scaled_poll_interval
|
151
|
+
process_count * Sidekiq.options[:average_scheduled_poll_interval]
|
152
|
+
end
|
153
|
+
|
154
|
+
def process_count
|
155
|
+
pcount = Sidekiq::ProcessSet.new.size
|
156
|
+
pcount = 1 if pcount == 0
|
157
|
+
pcount
|
158
|
+
end
|
159
|
+
|
160
|
+
def initial_wait
|
161
|
+
# Have all processes sleep between 5-15 seconds. 10 seconds
|
162
|
+
# to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
|
163
|
+
# of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
|
164
|
+
total = 0
|
165
|
+
total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average]
|
166
|
+
total += (5 * rand)
|
167
|
+
|
168
|
+
@sleeper.pop(total)
|
169
|
+
rescue Timeout::Error
|
170
|
+
end
|
171
|
+
|
172
|
+
end
|
173
|
+
end
|
174
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'sidekiq/testing'
|
3
|
+
|
4
|
+
##
|
5
|
+
# The Sidekiq inline infrastructure overrides perform_async so that it
|
6
|
+
# actually calls perform instead. This allows workers to be run inline in a
|
7
|
+
# testing environment.
|
8
|
+
#
|
9
|
+
# This is similar to `Resque.inline = true` functionality.
|
10
|
+
#
|
11
|
+
# Example:
|
12
|
+
#
|
13
|
+
# require 'sidekiq/testing/inline'
|
14
|
+
#
|
15
|
+
# $external_variable = 0
|
16
|
+
#
|
17
|
+
# class ExternalWorker
|
18
|
+
# include Sidekiq::Worker
|
19
|
+
#
|
20
|
+
# def perform
|
21
|
+
# $external_variable = 1
|
22
|
+
# end
|
23
|
+
# end
|
24
|
+
#
|
25
|
+
# assert_equal 0, $external_variable
|
26
|
+
# ExternalWorker.perform_async
|
27
|
+
# assert_equal 1, $external_variable
|
28
|
+
#
|
29
|
+
Sidekiq::Testing.inline!
|
@@ -0,0 +1,333 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'securerandom'
|
3
|
+
require 'sidekiq'
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
|
7
|
+
class Testing
|
8
|
+
class << self
|
9
|
+
attr_accessor :__test_mode
|
10
|
+
|
11
|
+
def __set_test_mode(mode)
|
12
|
+
if block_given?
|
13
|
+
current_mode = self.__test_mode
|
14
|
+
begin
|
15
|
+
self.__test_mode = mode
|
16
|
+
yield
|
17
|
+
ensure
|
18
|
+
self.__test_mode = current_mode
|
19
|
+
end
|
20
|
+
else
|
21
|
+
self.__test_mode = mode
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
def disable!(&block)
|
26
|
+
__set_test_mode(:disable, &block)
|
27
|
+
end
|
28
|
+
|
29
|
+
def fake!(&block)
|
30
|
+
__set_test_mode(:fake, &block)
|
31
|
+
end
|
32
|
+
|
33
|
+
def inline!(&block)
|
34
|
+
__set_test_mode(:inline, &block)
|
35
|
+
end
|
36
|
+
|
37
|
+
def enabled?
|
38
|
+
self.__test_mode != :disable
|
39
|
+
end
|
40
|
+
|
41
|
+
def disabled?
|
42
|
+
self.__test_mode == :disable
|
43
|
+
end
|
44
|
+
|
45
|
+
def fake?
|
46
|
+
self.__test_mode == :fake
|
47
|
+
end
|
48
|
+
|
49
|
+
def inline?
|
50
|
+
self.__test_mode == :inline
|
51
|
+
end
|
52
|
+
|
53
|
+
def server_middleware
|
54
|
+
@server_chain ||= Middleware::Chain.new
|
55
|
+
yield @server_chain if block_given?
|
56
|
+
@server_chain
|
57
|
+
end
|
58
|
+
|
59
|
+
def constantize(str)
|
60
|
+
names = str.split('::')
|
61
|
+
names.shift if names.empty? || names.first.empty?
|
62
|
+
|
63
|
+
names.inject(Object) do |constant, name|
|
64
|
+
constant.const_defined?(name) ? constant.const_get(name) : constant.const_missing(name)
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
# Default to fake testing to keep old behavior
|
71
|
+
Sidekiq::Testing.fake!
|
72
|
+
|
73
|
+
class EmptyQueueError < RuntimeError; end
|
74
|
+
|
75
|
+
module TestingClient
|
76
|
+
def raw_push(payloads)
|
77
|
+
if Sidekiq::Testing.fake?
|
78
|
+
payloads.each do |job|
|
79
|
+
job = Sidekiq.load_json(Sidekiq.dump_json(job))
|
80
|
+
job.merge!('enqueued_at' => Time.now.to_f) unless job['at']
|
81
|
+
Queues.push(job['queue'], job['class'], job)
|
82
|
+
end
|
83
|
+
true
|
84
|
+
elsif Sidekiq::Testing.inline?
|
85
|
+
payloads.each do |job|
|
86
|
+
klass = Sidekiq::Testing.constantize(job['class'])
|
87
|
+
job['id'] ||= SecureRandom.hex(12)
|
88
|
+
job_hash = Sidekiq.load_json(Sidekiq.dump_json(job))
|
89
|
+
klass.process_job(job_hash)
|
90
|
+
end
|
91
|
+
true
|
92
|
+
else
|
93
|
+
super
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
Sidekiq::Client.prepend TestingClient
|
99
|
+
|
100
|
+
module Queues
|
101
|
+
##
|
102
|
+
# The Queues class is only for testing the fake queue implementation.
|
103
|
+
# There are 2 data structures involved in tandem. This is due to the
|
104
|
+
# Rspec syntax of change(QueueWorker.jobs, :size). It keeps a reference
|
105
|
+
# to the array. Because the array was dervied from a filter of the total
|
106
|
+
# jobs enqueued, it appeared as though the array didn't change.
|
107
|
+
#
|
108
|
+
# To solve this, we'll keep 2 hashes containing the jobs. One with keys based
|
109
|
+
# on the queue, and another with keys of the worker names, so the array for
|
110
|
+
# QueueWorker.jobs is a straight reference to a real array.
|
111
|
+
#
|
112
|
+
# Queue-based hash:
|
113
|
+
#
|
114
|
+
# {
|
115
|
+
# "default"=>[
|
116
|
+
# {
|
117
|
+
# "class"=>"TestTesting::QueueWorker",
|
118
|
+
# "args"=>[1, 2],
|
119
|
+
# "retry"=>true,
|
120
|
+
# "queue"=>"default",
|
121
|
+
# "jid"=>"abc5b065c5c4b27fc1102833",
|
122
|
+
# "created_at"=>1447445554.419934
|
123
|
+
# }
|
124
|
+
# ]
|
125
|
+
# }
|
126
|
+
#
|
127
|
+
# Worker-based hash:
|
128
|
+
#
|
129
|
+
# {
|
130
|
+
# "TestTesting::QueueWorker"=>[
|
131
|
+
# {
|
132
|
+
# "class"=>"TestTesting::QueueWorker",
|
133
|
+
# "args"=>[1, 2],
|
134
|
+
# "retry"=>true,
|
135
|
+
# "queue"=>"default",
|
136
|
+
# "jid"=>"abc5b065c5c4b27fc1102833",
|
137
|
+
# "created_at"=>1447445554.419934
|
138
|
+
# }
|
139
|
+
# ]
|
140
|
+
# }
|
141
|
+
#
|
142
|
+
# Example:
|
143
|
+
#
|
144
|
+
# require 'sidekiq/testing'
|
145
|
+
#
|
146
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
147
|
+
# HardWorker.perform_async(:something)
|
148
|
+
# assert_equal 1, Sidekiq::Queues["default"].size
|
149
|
+
# assert_equal :something, Sidekiq::Queues["default"].first['args'][0]
|
150
|
+
#
|
151
|
+
# You can also clear all workers' jobs:
|
152
|
+
#
|
153
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
154
|
+
# HardWorker.perform_async(:something)
|
155
|
+
# Sidekiq::Queues.clear_all
|
156
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
157
|
+
#
|
158
|
+
# This can be useful to make sure jobs don't linger between tests:
|
159
|
+
#
|
160
|
+
# RSpec.configure do |config|
|
161
|
+
# config.before(:each) do
|
162
|
+
# Sidekiq::Queues.clear_all
|
163
|
+
# end
|
164
|
+
# end
|
165
|
+
#
|
166
|
+
class << self
|
167
|
+
def [](queue)
|
168
|
+
jobs_by_queue[queue]
|
169
|
+
end
|
170
|
+
|
171
|
+
def push(queue, klass, job)
|
172
|
+
jobs_by_queue[queue] << job
|
173
|
+
jobs_by_worker[klass] << job
|
174
|
+
end
|
175
|
+
|
176
|
+
def jobs_by_queue
|
177
|
+
@jobs_by_queue ||= Hash.new { |hash, key| hash[key] = [] }
|
178
|
+
end
|
179
|
+
|
180
|
+
def jobs_by_worker
|
181
|
+
@jobs_by_worker ||= Hash.new { |hash, key| hash[key] = [] }
|
182
|
+
end
|
183
|
+
|
184
|
+
def delete_for(jid, queue, klass)
|
185
|
+
jobs_by_queue[queue.to_s].delete_if { |job| job["jid"] == jid }
|
186
|
+
jobs_by_worker[klass].delete_if { |job| job["jid"] == jid }
|
187
|
+
end
|
188
|
+
|
189
|
+
def clear_for(queue, klass)
|
190
|
+
jobs_by_queue[queue].clear
|
191
|
+
jobs_by_worker[klass].clear
|
192
|
+
end
|
193
|
+
|
194
|
+
def clear_all
|
195
|
+
jobs_by_queue.clear
|
196
|
+
jobs_by_worker.clear
|
197
|
+
end
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
module Worker
|
202
|
+
##
|
203
|
+
# The Sidekiq testing infrastructure overrides perform_async
|
204
|
+
# so that it does not actually touch the network. Instead it
|
205
|
+
# stores the asynchronous jobs in a per-class array so that
|
206
|
+
# their presence/absence can be asserted by your tests.
|
207
|
+
#
|
208
|
+
# This is similar to ActionMailer's :test delivery_method and its
|
209
|
+
# ActionMailer::Base.deliveries array.
|
210
|
+
#
|
211
|
+
# Example:
|
212
|
+
#
|
213
|
+
# require 'sidekiq/testing'
|
214
|
+
#
|
215
|
+
# assert_equal 0, HardWorker.jobs.size
|
216
|
+
# HardWorker.perform_async(:something)
|
217
|
+
# assert_equal 1, HardWorker.jobs.size
|
218
|
+
# assert_equal :something, HardWorker.jobs[0]['args'][0]
|
219
|
+
#
|
220
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
221
|
+
# MyMailer.delay.send_welcome_email('foo@example.com')
|
222
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
|
223
|
+
#
|
224
|
+
# You can also clear and drain all workers' jobs:
|
225
|
+
#
|
226
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
227
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
|
228
|
+
#
|
229
|
+
# MyMailer.delay.send_welcome_email('foo@example.com')
|
230
|
+
# MyModel.delay.do_something_hard
|
231
|
+
#
|
232
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
|
233
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedModel.jobs.size
|
234
|
+
#
|
235
|
+
# Sidekiq::Worker.clear_all # or .drain_all
|
236
|
+
#
|
237
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
238
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
|
239
|
+
#
|
240
|
+
# This can be useful to make sure jobs don't linger between tests:
|
241
|
+
#
|
242
|
+
# RSpec.configure do |config|
|
243
|
+
# config.before(:each) do
|
244
|
+
# Sidekiq::Worker.clear_all
|
245
|
+
# end
|
246
|
+
# end
|
247
|
+
#
|
248
|
+
# or for acceptance testing, i.e. with cucumber:
|
249
|
+
#
|
250
|
+
# AfterStep do
|
251
|
+
# Sidekiq::Worker.drain_all
|
252
|
+
# end
|
253
|
+
#
|
254
|
+
# When I sign up as "foo@example.com"
|
255
|
+
# Then I should receive a welcome email to "foo@example.com"
|
256
|
+
#
|
257
|
+
module ClassMethods
|
258
|
+
|
259
|
+
# Queue for this worker
|
260
|
+
def queue
|
261
|
+
self.sidekiq_options["queue"]
|
262
|
+
end
|
263
|
+
|
264
|
+
# Jobs queued for this worker
|
265
|
+
def jobs
|
266
|
+
Queues.jobs_by_worker[self.to_s]
|
267
|
+
end
|
268
|
+
|
269
|
+
# Clear all jobs for this worker
|
270
|
+
def clear
|
271
|
+
Queues.clear_for(queue, self.to_s)
|
272
|
+
end
|
273
|
+
|
274
|
+
# Drain and run all jobs for this worker
|
275
|
+
def drain
|
276
|
+
while jobs.any?
|
277
|
+
next_job = jobs.first
|
278
|
+
Queues.delete_for(next_job["jid"], next_job["queue"], self.to_s)
|
279
|
+
process_job(next_job)
|
280
|
+
end
|
281
|
+
end
|
282
|
+
|
283
|
+
# Pop out a single job and perform it
|
284
|
+
def perform_one
|
285
|
+
raise(EmptyQueueError, "perform_one called with empty job queue") if jobs.empty?
|
286
|
+
next_job = jobs.first
|
287
|
+
Queues.delete_for(next_job["jid"], queue, self.to_s)
|
288
|
+
process_job(next_job)
|
289
|
+
end
|
290
|
+
|
291
|
+
def process_job(job)
|
292
|
+
worker = new
|
293
|
+
worker.jid = job['jid']
|
294
|
+
worker.bid = job['bid'] if worker.respond_to?(:bid=)
|
295
|
+
Sidekiq::Testing.server_middleware.invoke(worker, job, job['queue']) do
|
296
|
+
execute_job(worker, job['args'])
|
297
|
+
end
|
298
|
+
end
|
299
|
+
|
300
|
+
def execute_job(worker, args)
|
301
|
+
worker.perform(*args)
|
302
|
+
end
|
303
|
+
end
|
304
|
+
|
305
|
+
class << self
|
306
|
+
def jobs # :nodoc:
|
307
|
+
Queues.jobs_by_queue.values.flatten
|
308
|
+
end
|
309
|
+
|
310
|
+
# Clear all queued jobs across all workers
|
311
|
+
def clear_all
|
312
|
+
Queues.clear_all
|
313
|
+
end
|
314
|
+
|
315
|
+
# Drain all queued jobs across all workers
|
316
|
+
def drain_all
|
317
|
+
while jobs.any?
|
318
|
+
worker_classes = jobs.map { |job| job["class"] }.uniq
|
319
|
+
|
320
|
+
worker_classes.each do |worker_class|
|
321
|
+
Sidekiq::Testing.constantize(worker_class).drain
|
322
|
+
end
|
323
|
+
end
|
324
|
+
end
|
325
|
+
end
|
326
|
+
end
|
327
|
+
end
|
328
|
+
|
329
|
+
if defined?(::Rails) && Rails.respond_to?(:env) && !Rails.env.test?
|
330
|
+
puts("**************************************************")
|
331
|
+
puts("⛔️ WARNING: Sidekiq testing API enabled, but this is not the test environment. Your jobs will not go to Redis.")
|
332
|
+
puts("**************************************************")
|
333
|
+
end
|