sidekiq 5.2.8
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +7 -0
- data/.circleci/config.yml +61 -0
- data/.github/contributing.md +32 -0
- data/.github/issue_template.md +11 -0
- data/.gitignore +15 -0
- data/.travis.yml +11 -0
- data/3.0-Upgrade.md +70 -0
- data/4.0-Upgrade.md +53 -0
- data/5.0-Upgrade.md +56 -0
- data/COMM-LICENSE +97 -0
- data/Changes.md +1542 -0
- data/Ent-Changes.md +238 -0
- data/Gemfile +23 -0
- data/LICENSE +9 -0
- data/Pro-2.0-Upgrade.md +138 -0
- data/Pro-3.0-Upgrade.md +44 -0
- data/Pro-4.0-Upgrade.md +35 -0
- data/Pro-Changes.md +759 -0
- data/README.md +109 -0
- data/Rakefile +9 -0
- data/bin/sidekiq +18 -0
- data/bin/sidekiqctl +20 -0
- data/bin/sidekiqload +149 -0
- data/code_of_conduct.md +50 -0
- data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
- data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
- data/lib/generators/sidekiq/worker_generator.rb +49 -0
- data/lib/sidekiq.rb +237 -0
- data/lib/sidekiq/api.rb +940 -0
- data/lib/sidekiq/cli.rb +445 -0
- data/lib/sidekiq/client.rb +243 -0
- data/lib/sidekiq/core_ext.rb +1 -0
- data/lib/sidekiq/ctl.rb +221 -0
- data/lib/sidekiq/delay.rb +42 -0
- data/lib/sidekiq/exception_handler.rb +29 -0
- data/lib/sidekiq/extensions/action_mailer.rb +57 -0
- data/lib/sidekiq/extensions/active_record.rb +40 -0
- data/lib/sidekiq/extensions/class_methods.rb +40 -0
- data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
- data/lib/sidekiq/fetch.rb +81 -0
- data/lib/sidekiq/job_logger.rb +25 -0
- data/lib/sidekiq/job_retry.rb +262 -0
- data/lib/sidekiq/launcher.rb +173 -0
- data/lib/sidekiq/logging.rb +122 -0
- data/lib/sidekiq/manager.rb +137 -0
- data/lib/sidekiq/middleware/chain.rb +150 -0
- data/lib/sidekiq/middleware/i18n.rb +42 -0
- data/lib/sidekiq/middleware/server/active_record.rb +23 -0
- data/lib/sidekiq/paginator.rb +43 -0
- data/lib/sidekiq/processor.rb +279 -0
- data/lib/sidekiq/rails.rb +58 -0
- data/lib/sidekiq/redis_connection.rb +144 -0
- data/lib/sidekiq/scheduled.rb +174 -0
- data/lib/sidekiq/testing.rb +333 -0
- data/lib/sidekiq/testing/inline.rb +29 -0
- data/lib/sidekiq/util.rb +66 -0
- data/lib/sidekiq/version.rb +4 -0
- data/lib/sidekiq/web.rb +213 -0
- data/lib/sidekiq/web/action.rb +89 -0
- data/lib/sidekiq/web/application.rb +353 -0
- data/lib/sidekiq/web/helpers.rb +325 -0
- data/lib/sidekiq/web/router.rb +100 -0
- data/lib/sidekiq/worker.rb +220 -0
- data/sidekiq.gemspec +21 -0
- data/web/assets/images/favicon.ico +0 -0
- data/web/assets/images/logo.png +0 -0
- data/web/assets/images/status.png +0 -0
- data/web/assets/javascripts/application.js +92 -0
- data/web/assets/javascripts/dashboard.js +315 -0
- data/web/assets/stylesheets/application-rtl.css +246 -0
- data/web/assets/stylesheets/application.css +1144 -0
- data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
- data/web/assets/stylesheets/bootstrap.css +5 -0
- data/web/locales/ar.yml +81 -0
- data/web/locales/cs.yml +78 -0
- data/web/locales/da.yml +68 -0
- data/web/locales/de.yml +69 -0
- data/web/locales/el.yml +68 -0
- data/web/locales/en.yml +81 -0
- data/web/locales/es.yml +70 -0
- data/web/locales/fa.yml +80 -0
- data/web/locales/fr.yml +78 -0
- data/web/locales/he.yml +79 -0
- data/web/locales/hi.yml +75 -0
- data/web/locales/it.yml +69 -0
- data/web/locales/ja.yml +80 -0
- data/web/locales/ko.yml +68 -0
- data/web/locales/nb.yml +77 -0
- data/web/locales/nl.yml +68 -0
- data/web/locales/pl.yml +59 -0
- data/web/locales/pt-br.yml +68 -0
- data/web/locales/pt.yml +67 -0
- data/web/locales/ru.yml +78 -0
- data/web/locales/sv.yml +68 -0
- data/web/locales/ta.yml +75 -0
- data/web/locales/uk.yml +76 -0
- data/web/locales/ur.yml +80 -0
- data/web/locales/zh-cn.yml +68 -0
- data/web/locales/zh-tw.yml +68 -0
- data/web/views/_footer.erb +20 -0
- data/web/views/_job_info.erb +88 -0
- data/web/views/_nav.erb +52 -0
- data/web/views/_paging.erb +23 -0
- data/web/views/_poll_link.erb +7 -0
- data/web/views/_status.erb +4 -0
- data/web/views/_summary.erb +40 -0
- data/web/views/busy.erb +98 -0
- data/web/views/dashboard.erb +75 -0
- data/web/views/dead.erb +34 -0
- data/web/views/layout.erb +40 -0
- data/web/views/morgue.erb +75 -0
- data/web/views/queue.erb +46 -0
- data/web/views/queues.erb +30 -0
- data/web/views/retries.erb +80 -0
- data/web/views/retry.erb +34 -0
- data/web/views/scheduled.erb +54 -0
- data/web/views/scheduled_job_info.erb +8 -0
- metadata +230 -0
@@ -0,0 +1,174 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'sidekiq'
|
3
|
+
require 'sidekiq/util'
|
4
|
+
require 'sidekiq/api'
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
module Scheduled
|
8
|
+
SETS = %w(retry schedule)
|
9
|
+
|
10
|
+
class Enq
|
11
|
+
def enqueue_jobs(now=Time.now.to_f.to_s, sorted_sets=SETS)
|
12
|
+
# A job's "score" in Redis is the time at which it should be processed.
|
13
|
+
# Just check Redis for the set of jobs with a timestamp before now.
|
14
|
+
Sidekiq.redis do |conn|
|
15
|
+
sorted_sets.each do |sorted_set|
|
16
|
+
# Get the next item in the queue if it's score (time to execute) is <= now.
|
17
|
+
# We need to go through the list one at a time to reduce the risk of something
|
18
|
+
# going wrong between the time jobs are popped from the scheduled queue and when
|
19
|
+
# they are pushed onto a work queue and losing the jobs.
|
20
|
+
while job = conn.zrangebyscore(sorted_set, '-inf', now, :limit => [0, 1]).first do
|
21
|
+
|
22
|
+
# Pop item off the queue and add it to the work queue. If the job can't be popped from
|
23
|
+
# the queue, it's because another process already popped it so we can move on to the
|
24
|
+
# next one.
|
25
|
+
if conn.zrem(sorted_set, job)
|
26
|
+
Sidekiq::Client.push(Sidekiq.load_json(job))
|
27
|
+
Sidekiq::Logging.logger.debug { "enqueued #{sorted_set}: #{job}" }
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
##
|
36
|
+
# The Poller checks Redis every N seconds for jobs in the retry or scheduled
|
37
|
+
# set have passed their timestamp and should be enqueued. If so, it
|
38
|
+
# just pops the job back onto its original queue so the
|
39
|
+
# workers can pick it up like any other job.
|
40
|
+
class Poller
|
41
|
+
include Util
|
42
|
+
|
43
|
+
INITIAL_WAIT = 10
|
44
|
+
|
45
|
+
def initialize
|
46
|
+
@enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
|
47
|
+
@sleeper = ConnectionPool::TimedStack.new
|
48
|
+
@done = false
|
49
|
+
@thread = nil
|
50
|
+
end
|
51
|
+
|
52
|
+
# Shut down this instance, will pause until the thread is dead.
|
53
|
+
def terminate
|
54
|
+
@done = true
|
55
|
+
if @thread
|
56
|
+
t = @thread
|
57
|
+
@thread = nil
|
58
|
+
@sleeper << 0
|
59
|
+
t.value
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def start
|
64
|
+
@thread ||= safe_thread("scheduler") do
|
65
|
+
initial_wait
|
66
|
+
|
67
|
+
while !@done
|
68
|
+
enqueue
|
69
|
+
wait
|
70
|
+
end
|
71
|
+
Sidekiq.logger.info("Scheduler exiting...")
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
def enqueue
|
76
|
+
begin
|
77
|
+
@enq.enqueue_jobs
|
78
|
+
rescue => ex
|
79
|
+
# Most likely a problem with redis networking.
|
80
|
+
# Punt and try again at the next interval
|
81
|
+
logger.error ex.message
|
82
|
+
handle_exception(ex)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
|
88
|
+
def wait
|
89
|
+
@sleeper.pop(random_poll_interval)
|
90
|
+
rescue Timeout::Error
|
91
|
+
# expected
|
92
|
+
rescue => ex
|
93
|
+
# if poll_interval_average hasn't been calculated yet, we can
|
94
|
+
# raise an error trying to reach Redis.
|
95
|
+
logger.error ex.message
|
96
|
+
handle_exception(ex)
|
97
|
+
sleep 5
|
98
|
+
end
|
99
|
+
|
100
|
+
def random_poll_interval
|
101
|
+
# We want one Sidekiq process to schedule jobs every N seconds. We have M processes
|
102
|
+
# and **don't** want to coordinate.
|
103
|
+
#
|
104
|
+
# So in N*M second timespan, we want each process to schedule once. The basic loop is:
|
105
|
+
#
|
106
|
+
# * sleep a random amount within that N*M timespan
|
107
|
+
# * wake up and schedule
|
108
|
+
#
|
109
|
+
# We want to avoid one edge case: imagine a set of 2 processes, scheduling every 5 seconds,
|
110
|
+
# so N*M = 10. Each process decides to randomly sleep 8 seconds, now we've failed to meet
|
111
|
+
# that 5 second average. Thankfully each schedule cycle will sleep randomly so the next
|
112
|
+
# iteration could see each process sleep for 1 second, undercutting our average.
|
113
|
+
#
|
114
|
+
# So below 10 processes, we special case and ensure the processes sleep closer to the average.
|
115
|
+
# In the example above, each process should schedule every 10 seconds on average. We special
|
116
|
+
# case smaller clusters to add 50% so they would sleep somewhere between 5 and 15 seconds.
|
117
|
+
# As we run more processes, the scheduling interval average will approach an even spread
|
118
|
+
# between 0 and poll interval so we don't need this artifical boost.
|
119
|
+
#
|
120
|
+
if process_count < 10
|
121
|
+
# For small clusters, calculate a random interval that is ±50% the desired average.
|
122
|
+
poll_interval_average * rand + poll_interval_average.to_f / 2
|
123
|
+
else
|
124
|
+
# With 10+ processes, we should have enough randomness to get decent polling
|
125
|
+
# across the entire timespan
|
126
|
+
poll_interval_average * rand
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
# We do our best to tune the poll interval to the size of the active Sidekiq
|
131
|
+
# cluster. If you have 30 processes and poll every 15 seconds, that means one
|
132
|
+
# Sidekiq is checking Redis every 0.5 seconds - way too often for most people
|
133
|
+
# and really bad if the retry or scheduled sets are large.
|
134
|
+
#
|
135
|
+
# Instead try to avoid polling more than once every 15 seconds. If you have
|
136
|
+
# 30 Sidekiq processes, we'll poll every 30 * 15 or 450 seconds.
|
137
|
+
# To keep things statistically random, we'll sleep a random amount between
|
138
|
+
# 225 and 675 seconds for each poll or 450 seconds on average. Otherwise restarting
|
139
|
+
# all your Sidekiq processes at the same time will lead to them all polling at
|
140
|
+
# the same time: the thundering herd problem.
|
141
|
+
#
|
142
|
+
# We only do this if poll_interval_average is unset (the default).
|
143
|
+
def poll_interval_average
|
144
|
+
Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval
|
145
|
+
end
|
146
|
+
|
147
|
+
# Calculates an average poll interval based on the number of known Sidekiq processes.
|
148
|
+
# This minimizes a single point of failure by dispersing check-ins but without taxing
|
149
|
+
# Redis if you run many Sidekiq processes.
|
150
|
+
def scaled_poll_interval
|
151
|
+
process_count * Sidekiq.options[:average_scheduled_poll_interval]
|
152
|
+
end
|
153
|
+
|
154
|
+
def process_count
|
155
|
+
pcount = Sidekiq::ProcessSet.new.size
|
156
|
+
pcount = 1 if pcount == 0
|
157
|
+
pcount
|
158
|
+
end
|
159
|
+
|
160
|
+
def initial_wait
|
161
|
+
# Have all processes sleep between 5-15 seconds. 10 seconds
|
162
|
+
# to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
|
163
|
+
# of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
|
164
|
+
total = 0
|
165
|
+
total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average]
|
166
|
+
total += (5 * rand)
|
167
|
+
|
168
|
+
@sleeper.pop(total)
|
169
|
+
rescue Timeout::Error
|
170
|
+
end
|
171
|
+
|
172
|
+
end
|
173
|
+
end
|
174
|
+
end
|
@@ -0,0 +1,333 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
require 'securerandom'
|
3
|
+
require 'sidekiq'
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
|
7
|
+
class Testing
|
8
|
+
class << self
|
9
|
+
attr_accessor :__test_mode
|
10
|
+
|
11
|
+
def __set_test_mode(mode)
|
12
|
+
if block_given?
|
13
|
+
current_mode = self.__test_mode
|
14
|
+
begin
|
15
|
+
self.__test_mode = mode
|
16
|
+
yield
|
17
|
+
ensure
|
18
|
+
self.__test_mode = current_mode
|
19
|
+
end
|
20
|
+
else
|
21
|
+
self.__test_mode = mode
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
def disable!(&block)
|
26
|
+
__set_test_mode(:disable, &block)
|
27
|
+
end
|
28
|
+
|
29
|
+
def fake!(&block)
|
30
|
+
__set_test_mode(:fake, &block)
|
31
|
+
end
|
32
|
+
|
33
|
+
def inline!(&block)
|
34
|
+
__set_test_mode(:inline, &block)
|
35
|
+
end
|
36
|
+
|
37
|
+
def enabled?
|
38
|
+
self.__test_mode != :disable
|
39
|
+
end
|
40
|
+
|
41
|
+
def disabled?
|
42
|
+
self.__test_mode == :disable
|
43
|
+
end
|
44
|
+
|
45
|
+
def fake?
|
46
|
+
self.__test_mode == :fake
|
47
|
+
end
|
48
|
+
|
49
|
+
def inline?
|
50
|
+
self.__test_mode == :inline
|
51
|
+
end
|
52
|
+
|
53
|
+
def server_middleware
|
54
|
+
@server_chain ||= Middleware::Chain.new
|
55
|
+
yield @server_chain if block_given?
|
56
|
+
@server_chain
|
57
|
+
end
|
58
|
+
|
59
|
+
def constantize(str)
|
60
|
+
names = str.split('::')
|
61
|
+
names.shift if names.empty? || names.first.empty?
|
62
|
+
|
63
|
+
names.inject(Object) do |constant, name|
|
64
|
+
constant.const_defined?(name) ? constant.const_get(name) : constant.const_missing(name)
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
# Default to fake testing to keep old behavior
|
71
|
+
Sidekiq::Testing.fake!
|
72
|
+
|
73
|
+
class EmptyQueueError < RuntimeError; end
|
74
|
+
|
75
|
+
module TestingClient
|
76
|
+
def raw_push(payloads)
|
77
|
+
if Sidekiq::Testing.fake?
|
78
|
+
payloads.each do |job|
|
79
|
+
job = Sidekiq.load_json(Sidekiq.dump_json(job))
|
80
|
+
job.merge!('enqueued_at' => Time.now.to_f) unless job['at']
|
81
|
+
Queues.push(job['queue'], job['class'], job)
|
82
|
+
end
|
83
|
+
true
|
84
|
+
elsif Sidekiq::Testing.inline?
|
85
|
+
payloads.each do |job|
|
86
|
+
klass = Sidekiq::Testing.constantize(job['class'])
|
87
|
+
job['id'] ||= SecureRandom.hex(12)
|
88
|
+
job_hash = Sidekiq.load_json(Sidekiq.dump_json(job))
|
89
|
+
klass.process_job(job_hash)
|
90
|
+
end
|
91
|
+
true
|
92
|
+
else
|
93
|
+
super
|
94
|
+
end
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
Sidekiq::Client.prepend TestingClient
|
99
|
+
|
100
|
+
module Queues
|
101
|
+
##
|
102
|
+
# The Queues class is only for testing the fake queue implementation.
|
103
|
+
# There are 2 data structures involved in tandem. This is due to the
|
104
|
+
# Rspec syntax of change(QueueWorker.jobs, :size). It keeps a reference
|
105
|
+
# to the array. Because the array was dervied from a filter of the total
|
106
|
+
# jobs enqueued, it appeared as though the array didn't change.
|
107
|
+
#
|
108
|
+
# To solve this, we'll keep 2 hashes containing the jobs. One with keys based
|
109
|
+
# on the queue, and another with keys of the worker names, so the array for
|
110
|
+
# QueueWorker.jobs is a straight reference to a real array.
|
111
|
+
#
|
112
|
+
# Queue-based hash:
|
113
|
+
#
|
114
|
+
# {
|
115
|
+
# "default"=>[
|
116
|
+
# {
|
117
|
+
# "class"=>"TestTesting::QueueWorker",
|
118
|
+
# "args"=>[1, 2],
|
119
|
+
# "retry"=>true,
|
120
|
+
# "queue"=>"default",
|
121
|
+
# "jid"=>"abc5b065c5c4b27fc1102833",
|
122
|
+
# "created_at"=>1447445554.419934
|
123
|
+
# }
|
124
|
+
# ]
|
125
|
+
# }
|
126
|
+
#
|
127
|
+
# Worker-based hash:
|
128
|
+
#
|
129
|
+
# {
|
130
|
+
# "TestTesting::QueueWorker"=>[
|
131
|
+
# {
|
132
|
+
# "class"=>"TestTesting::QueueWorker",
|
133
|
+
# "args"=>[1, 2],
|
134
|
+
# "retry"=>true,
|
135
|
+
# "queue"=>"default",
|
136
|
+
# "jid"=>"abc5b065c5c4b27fc1102833",
|
137
|
+
# "created_at"=>1447445554.419934
|
138
|
+
# }
|
139
|
+
# ]
|
140
|
+
# }
|
141
|
+
#
|
142
|
+
# Example:
|
143
|
+
#
|
144
|
+
# require 'sidekiq/testing'
|
145
|
+
#
|
146
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
147
|
+
# HardWorker.perform_async(:something)
|
148
|
+
# assert_equal 1, Sidekiq::Queues["default"].size
|
149
|
+
# assert_equal :something, Sidekiq::Queues["default"].first['args'][0]
|
150
|
+
#
|
151
|
+
# You can also clear all workers' jobs:
|
152
|
+
#
|
153
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
154
|
+
# HardWorker.perform_async(:something)
|
155
|
+
# Sidekiq::Queues.clear_all
|
156
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
157
|
+
#
|
158
|
+
# This can be useful to make sure jobs don't linger between tests:
|
159
|
+
#
|
160
|
+
# RSpec.configure do |config|
|
161
|
+
# config.before(:each) do
|
162
|
+
# Sidekiq::Queues.clear_all
|
163
|
+
# end
|
164
|
+
# end
|
165
|
+
#
|
166
|
+
class << self
|
167
|
+
def [](queue)
|
168
|
+
jobs_by_queue[queue]
|
169
|
+
end
|
170
|
+
|
171
|
+
def push(queue, klass, job)
|
172
|
+
jobs_by_queue[queue] << job
|
173
|
+
jobs_by_worker[klass] << job
|
174
|
+
end
|
175
|
+
|
176
|
+
def jobs_by_queue
|
177
|
+
@jobs_by_queue ||= Hash.new { |hash, key| hash[key] = [] }
|
178
|
+
end
|
179
|
+
|
180
|
+
def jobs_by_worker
|
181
|
+
@jobs_by_worker ||= Hash.new { |hash, key| hash[key] = [] }
|
182
|
+
end
|
183
|
+
|
184
|
+
def delete_for(jid, queue, klass)
|
185
|
+
jobs_by_queue[queue.to_s].delete_if { |job| job["jid"] == jid }
|
186
|
+
jobs_by_worker[klass].delete_if { |job| job["jid"] == jid }
|
187
|
+
end
|
188
|
+
|
189
|
+
def clear_for(queue, klass)
|
190
|
+
jobs_by_queue[queue].clear
|
191
|
+
jobs_by_worker[klass].clear
|
192
|
+
end
|
193
|
+
|
194
|
+
def clear_all
|
195
|
+
jobs_by_queue.clear
|
196
|
+
jobs_by_worker.clear
|
197
|
+
end
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
module Worker
|
202
|
+
##
|
203
|
+
# The Sidekiq testing infrastructure overrides perform_async
|
204
|
+
# so that it does not actually touch the network. Instead it
|
205
|
+
# stores the asynchronous jobs in a per-class array so that
|
206
|
+
# their presence/absence can be asserted by your tests.
|
207
|
+
#
|
208
|
+
# This is similar to ActionMailer's :test delivery_method and its
|
209
|
+
# ActionMailer::Base.deliveries array.
|
210
|
+
#
|
211
|
+
# Example:
|
212
|
+
#
|
213
|
+
# require 'sidekiq/testing'
|
214
|
+
#
|
215
|
+
# assert_equal 0, HardWorker.jobs.size
|
216
|
+
# HardWorker.perform_async(:something)
|
217
|
+
# assert_equal 1, HardWorker.jobs.size
|
218
|
+
# assert_equal :something, HardWorker.jobs[0]['args'][0]
|
219
|
+
#
|
220
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
221
|
+
# MyMailer.delay.send_welcome_email('foo@example.com')
|
222
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
|
223
|
+
#
|
224
|
+
# You can also clear and drain all workers' jobs:
|
225
|
+
#
|
226
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
227
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
|
228
|
+
#
|
229
|
+
# MyMailer.delay.send_welcome_email('foo@example.com')
|
230
|
+
# MyModel.delay.do_something_hard
|
231
|
+
#
|
232
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedMailer.jobs.size
|
233
|
+
# assert_equal 1, Sidekiq::Extensions::DelayedModel.jobs.size
|
234
|
+
#
|
235
|
+
# Sidekiq::Worker.clear_all # or .drain_all
|
236
|
+
#
|
237
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedMailer.jobs.size
|
238
|
+
# assert_equal 0, Sidekiq::Extensions::DelayedModel.jobs.size
|
239
|
+
#
|
240
|
+
# This can be useful to make sure jobs don't linger between tests:
|
241
|
+
#
|
242
|
+
# RSpec.configure do |config|
|
243
|
+
# config.before(:each) do
|
244
|
+
# Sidekiq::Worker.clear_all
|
245
|
+
# end
|
246
|
+
# end
|
247
|
+
#
|
248
|
+
# or for acceptance testing, i.e. with cucumber:
|
249
|
+
#
|
250
|
+
# AfterStep do
|
251
|
+
# Sidekiq::Worker.drain_all
|
252
|
+
# end
|
253
|
+
#
|
254
|
+
# When I sign up as "foo@example.com"
|
255
|
+
# Then I should receive a welcome email to "foo@example.com"
|
256
|
+
#
|
257
|
+
module ClassMethods
|
258
|
+
|
259
|
+
# Queue for this worker
|
260
|
+
def queue
|
261
|
+
self.sidekiq_options["queue"]
|
262
|
+
end
|
263
|
+
|
264
|
+
# Jobs queued for this worker
|
265
|
+
def jobs
|
266
|
+
Queues.jobs_by_worker[self.to_s]
|
267
|
+
end
|
268
|
+
|
269
|
+
# Clear all jobs for this worker
|
270
|
+
def clear
|
271
|
+
Queues.clear_for(queue, self.to_s)
|
272
|
+
end
|
273
|
+
|
274
|
+
# Drain and run all jobs for this worker
|
275
|
+
def drain
|
276
|
+
while jobs.any?
|
277
|
+
next_job = jobs.first
|
278
|
+
Queues.delete_for(next_job["jid"], next_job["queue"], self.to_s)
|
279
|
+
process_job(next_job)
|
280
|
+
end
|
281
|
+
end
|
282
|
+
|
283
|
+
# Pop out a single job and perform it
|
284
|
+
def perform_one
|
285
|
+
raise(EmptyQueueError, "perform_one called with empty job queue") if jobs.empty?
|
286
|
+
next_job = jobs.first
|
287
|
+
Queues.delete_for(next_job["jid"], queue, self.to_s)
|
288
|
+
process_job(next_job)
|
289
|
+
end
|
290
|
+
|
291
|
+
def process_job(job)
|
292
|
+
worker = new
|
293
|
+
worker.jid = job['jid']
|
294
|
+
worker.bid = job['bid'] if worker.respond_to?(:bid=)
|
295
|
+
Sidekiq::Testing.server_middleware.invoke(worker, job, job['queue']) do
|
296
|
+
execute_job(worker, job['args'])
|
297
|
+
end
|
298
|
+
end
|
299
|
+
|
300
|
+
def execute_job(worker, args)
|
301
|
+
worker.perform(*args)
|
302
|
+
end
|
303
|
+
end
|
304
|
+
|
305
|
+
class << self
|
306
|
+
def jobs # :nodoc:
|
307
|
+
Queues.jobs_by_queue.values.flatten
|
308
|
+
end
|
309
|
+
|
310
|
+
# Clear all queued jobs across all workers
|
311
|
+
def clear_all
|
312
|
+
Queues.clear_all
|
313
|
+
end
|
314
|
+
|
315
|
+
# Drain all queued jobs across all workers
|
316
|
+
def drain_all
|
317
|
+
while jobs.any?
|
318
|
+
worker_classes = jobs.map { |job| job["class"] }.uniq
|
319
|
+
|
320
|
+
worker_classes.each do |worker_class|
|
321
|
+
Sidekiq::Testing.constantize(worker_class).drain
|
322
|
+
end
|
323
|
+
end
|
324
|
+
end
|
325
|
+
end
|
326
|
+
end
|
327
|
+
end
|
328
|
+
|
329
|
+
if defined?(::Rails) && Rails.respond_to?(:env) && !Rails.env.test?
|
330
|
+
puts("**************************************************")
|
331
|
+
puts("⛔️ WARNING: Sidekiq testing API enabled, but this is not the test environment. Your jobs will not go to Redis.")
|
332
|
+
puts("**************************************************")
|
333
|
+
end
|