sidekiq 3.4.1 → 4.0.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/4.0-Upgrade.md +50 -0
- data/COMM-LICENSE +55 -45
- data/Changes.md +73 -1
- data/Ent-Changes.md +66 -0
- data/Gemfile +7 -1
- data/Pro-2.0-Upgrade.md +2 -2
- data/Pro-3.0-Upgrade.md +46 -0
- data/Pro-Changes.md +65 -2
- data/README.md +8 -9
- data/bin/sidekiq +5 -0
- data/bin/sidekiqctl +8 -2
- data/bin/sidekiqload +167 -0
- data/lib/sidekiq/api.rb +29 -31
- data/lib/sidekiq/cli.rb +41 -42
- data/lib/sidekiq/client.rb +5 -10
- data/lib/sidekiq/fetch.rb +35 -111
- data/lib/sidekiq/launcher.rb +102 -42
- data/lib/sidekiq/manager.rb +78 -180
- data/lib/sidekiq/middleware/server/logging.rb +10 -5
- data/lib/sidekiq/middleware/server/retry_jobs.rb +5 -5
- data/lib/sidekiq/processor.rb +126 -97
- data/lib/sidekiq/redis_connection.rb +23 -5
- data/lib/sidekiq/scheduled.rb +47 -26
- data/lib/sidekiq/testing.rb +96 -17
- data/lib/sidekiq/util.rb +20 -0
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web.rb +17 -1
- data/lib/sidekiq/web_helpers.rb +26 -4
- data/lib/sidekiq/worker.rb +14 -0
- data/lib/sidekiq.rb +37 -14
- data/sidekiq.gemspec +11 -11
- data/test/helper.rb +45 -10
- data/test/test_actors.rb +137 -0
- data/test/test_api.rb +388 -388
- data/test/test_cli.rb +29 -59
- data/test/test_client.rb +60 -135
- data/test/test_extensions.rb +29 -23
- data/test/test_fetch.rb +2 -57
- data/test/test_launcher.rb +80 -0
- data/test/test_logging.rb +1 -1
- data/test/test_manager.rb +16 -131
- data/test/test_middleware.rb +3 -5
- data/test/test_processor.rb +110 -76
- data/test/test_rails.rb +21 -0
- data/test/test_redis_connection.rb +0 -1
- data/test/test_retry.rb +114 -162
- data/test/test_scheduled.rb +11 -17
- data/test/test_scheduling.rb +20 -42
- data/test/test_sidekiq.rb +46 -16
- data/test/test_testing.rb +80 -20
- data/test/test_testing_fake.rb +68 -8
- data/test/test_testing_inline.rb +3 -3
- data/test/test_util.rb +16 -0
- data/test/test_web.rb +17 -3
- data/test/test_web_helpers.rb +3 -2
- data/web/assets/images/favicon.ico +0 -0
- data/web/assets/javascripts/application.js +6 -1
- data/web/assets/javascripts/dashboard.js +2 -8
- data/web/assets/javascripts/locales/jquery.timeago.pt-br.js +14 -14
- data/web/assets/stylesheets/application.css +33 -56
- data/web/locales/de.yml +1 -1
- data/web/locales/en.yml +1 -0
- data/web/locales/{no.yml → nb.yml} +10 -2
- data/web/locales/uk.yml +76 -0
- data/web/views/_footer.erb +2 -7
- data/web/views/_job_info.erb +1 -1
- data/web/views/_nav.erb +2 -2
- data/web/views/_poll_js.erb +5 -0
- data/web/views/{_poll.erb → _poll_link.erb} +0 -3
- data/web/views/busy.erb +2 -1
- data/web/views/dead.erb +1 -0
- data/web/views/layout.erb +2 -0
- data/web/views/morgue.erb +3 -0
- data/web/views/queue.erb +1 -0
- data/web/views/queues.erb +1 -0
- data/web/views/retries.erb +3 -0
- data/web/views/retry.erb +1 -0
- data/web/views/scheduled.erb +1 -0
- data/web/views/scheduled_job_info.erb +1 -0
- metadata +81 -47
- data/lib/sidekiq/actor.rb +0 -39
- data/test/test_worker_generator.rb +0 -17
@@ -4,11 +4,7 @@ module Sidekiq
|
|
4
4
|
class Logging
|
5
5
|
|
6
6
|
def call(worker, item, queue)
|
7
|
-
|
8
|
-
# attribute to expose the underlying thing.
|
9
|
-
klass = item['wrapped'] || worker.class.to_s
|
10
|
-
|
11
|
-
Sidekiq::Logging.with_context("#{klass} JID-#{item['jid']}#{" BID-#{item['bid']}" if item['bid']}") do
|
7
|
+
Sidekiq::Logging.with_context(log_context(worker, item)) do
|
12
8
|
begin
|
13
9
|
start = Time.now
|
14
10
|
logger.info { "start" }
|
@@ -21,6 +17,15 @@ module Sidekiq
|
|
21
17
|
end
|
22
18
|
end
|
23
19
|
|
20
|
+
private
|
21
|
+
|
22
|
+
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
23
|
+
# attribute to expose the underlying thing.
|
24
|
+
def log_context(worker, item)
|
25
|
+
klass = item['wrapped'.freeze] || worker.class.to_s
|
26
|
+
"#{klass} JID-#{item['jid'.freeze]}#{" BID-#{item['bid'.freeze]}" if item['bid'.freeze]}"
|
27
|
+
end
|
28
|
+
|
24
29
|
def elapsed(start)
|
25
30
|
(Time.now - start).round(3)
|
26
31
|
end
|
@@ -121,7 +121,7 @@ module Sidekiq
|
|
121
121
|
end
|
122
122
|
|
123
123
|
if count < max_retry_attempts
|
124
|
-
delay = delay_for(worker, count)
|
124
|
+
delay = delay_for(worker, count, exception)
|
125
125
|
logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
126
126
|
retry_at = Time.now.to_f + delay
|
127
127
|
payload = Sidekiq.dump_json(msg)
|
@@ -170,8 +170,8 @@ module Sidekiq
|
|
170
170
|
end
|
171
171
|
end
|
172
172
|
|
173
|
-
def delay_for(worker, count)
|
174
|
-
worker.sidekiq_retry_in_block? && retry_in(worker, count) || seconds_to_delay(count)
|
173
|
+
def delay_for(worker, count, exception)
|
174
|
+
worker.sidekiq_retry_in_block? && retry_in(worker, count, exception) || seconds_to_delay(count)
|
175
175
|
end
|
176
176
|
|
177
177
|
# delayed_job uses the same basic formula
|
@@ -179,9 +179,9 @@ module Sidekiq
|
|
179
179
|
(count ** 4) + 15 + (rand(30)*(count+1))
|
180
180
|
end
|
181
181
|
|
182
|
-
def retry_in(worker, count)
|
182
|
+
def retry_in(worker, count, exception)
|
183
183
|
begin
|
184
|
-
worker.sidekiq_retry_in_block.call(count)
|
184
|
+
worker.sidekiq_retry_in_block.call(count, exception).to_i
|
185
185
|
rescue Exception => e
|
186
186
|
handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
|
187
187
|
nil
|
data/lib/sidekiq/processor.rb
CHANGED
@@ -1,155 +1,184 @@
|
|
1
1
|
require 'sidekiq/util'
|
2
|
-
require 'sidekiq/
|
3
|
-
|
4
|
-
require '
|
5
|
-
require '
|
2
|
+
require 'sidekiq/fetch'
|
3
|
+
require 'thread'
|
4
|
+
require 'concurrent/map'
|
5
|
+
require 'concurrent/atomic/atomic_fixnum'
|
6
6
|
|
7
7
|
module Sidekiq
|
8
8
|
##
|
9
|
-
# The Processor
|
10
|
-
#
|
11
|
-
#
|
9
|
+
# The Processor is a standalone thread which:
|
10
|
+
#
|
11
|
+
# 1. fetches a job from Redis
|
12
|
+
# 2. executes the job
|
13
|
+
# a. instantiate the Worker
|
14
|
+
# b. run the middleware chain
|
15
|
+
# c. call #perform
|
16
|
+
#
|
17
|
+
# A Processor can exit due to shutdown (processor_stopped)
|
18
|
+
# or due to an error during job execution (processor_died)
|
19
|
+
#
|
20
|
+
# If an error occurs in the job execution, the
|
21
|
+
# Processor calls the Manager to create a new one
|
22
|
+
# to replace itself and exits.
|
23
|
+
#
|
12
24
|
class Processor
|
13
|
-
# To prevent a memory leak, ensure that stats expire. However, they should take up a minimal amount of storage
|
14
|
-
# so keep them around for a long time
|
15
|
-
STATS_TIMEOUT = 24 * 60 * 60 * 365 * 5
|
16
25
|
|
17
26
|
include Util
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
+
|
28
|
+
attr_reader :thread
|
29
|
+
attr_reader :job
|
30
|
+
|
31
|
+
def initialize(mgr)
|
32
|
+
@mgr = mgr
|
33
|
+
@down = false
|
34
|
+
@done = false
|
35
|
+
@job = nil
|
36
|
+
@thread = nil
|
37
|
+
@strategy = (mgr.options[:fetch] || Sidekiq::BasicFetch).new(mgr.options)
|
38
|
+
end
|
39
|
+
|
40
|
+
def terminate(wait=false)
|
41
|
+
@done = true
|
42
|
+
return if !@thread
|
43
|
+
@thread.value if wait
|
44
|
+
end
|
45
|
+
|
46
|
+
def kill(wait=false)
|
47
|
+
@done = true
|
48
|
+
return if !@thread
|
49
|
+
# unlike the other actors, terminate does not wait
|
50
|
+
# for the thread to finish because we don't know how
|
51
|
+
# long the job will take to finish. Instead we
|
52
|
+
# provide a `kill` method to call after the shutdown
|
53
|
+
# timeout passes.
|
54
|
+
@thread.raise ::Sidekiq::Shutdown
|
55
|
+
@thread.value if wait
|
56
|
+
end
|
57
|
+
|
58
|
+
def start
|
59
|
+
@thread ||= safe_thread("processor", &method(:run))
|
60
|
+
end
|
61
|
+
|
62
|
+
private unless $TESTING
|
63
|
+
|
64
|
+
def run
|
65
|
+
begin
|
66
|
+
while !@done
|
67
|
+
process_one
|
27
68
|
end
|
69
|
+
@mgr.processor_stopped(self)
|
70
|
+
rescue Sidekiq::Shutdown
|
71
|
+
@mgr.processor_stopped(self)
|
72
|
+
rescue Exception => ex
|
73
|
+
@mgr.processor_died(self, ex)
|
28
74
|
end
|
29
75
|
end
|
30
76
|
|
31
|
-
|
77
|
+
def process_one
|
78
|
+
@job = fetch
|
79
|
+
process(@job) if @job
|
80
|
+
@job = nil
|
81
|
+
end
|
82
|
+
|
83
|
+
def get_one
|
84
|
+
begin
|
85
|
+
work = @strategy.retrieve_work
|
86
|
+
(logger.info { "Redis is online, #{Time.now - @down} sec downtime" }; @down = nil) if @down
|
87
|
+
work
|
88
|
+
rescue Sidekiq::Shutdown
|
89
|
+
rescue => ex
|
90
|
+
handle_fetch_exception(ex)
|
91
|
+
end
|
92
|
+
end
|
32
93
|
|
33
|
-
def
|
34
|
-
|
94
|
+
def fetch
|
95
|
+
j = get_one
|
96
|
+
if j && @done
|
97
|
+
j.requeue
|
98
|
+
nil
|
99
|
+
else
|
100
|
+
j
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
def handle_fetch_exception(ex)
|
105
|
+
if !@down
|
106
|
+
@down = Time.now
|
107
|
+
logger.error("Error fetching job: #{ex}")
|
108
|
+
ex.backtrace.each do |bt|
|
109
|
+
logger.error(bt)
|
110
|
+
end
|
111
|
+
end
|
112
|
+
sleep(1)
|
35
113
|
end
|
36
114
|
|
37
115
|
def process(work)
|
38
|
-
|
116
|
+
jobstr = work.job
|
39
117
|
queue = work.queue_name
|
40
118
|
|
41
|
-
|
42
|
-
|
43
|
-
ack = true
|
119
|
+
ack = false
|
44
120
|
begin
|
45
|
-
|
46
|
-
klass =
|
121
|
+
job = Sidekiq.load_json(jobstr)
|
122
|
+
klass = job['class'.freeze].constantize
|
47
123
|
worker = klass.new
|
48
|
-
worker.jid =
|
49
|
-
|
50
|
-
stats(worker,
|
51
|
-
Sidekiq.server_middleware.invoke(worker,
|
52
|
-
|
124
|
+
worker.jid = job['jid'.freeze]
|
125
|
+
|
126
|
+
stats(worker, job, queue) do
|
127
|
+
Sidekiq.server_middleware.invoke(worker, job, queue) do
|
128
|
+
# Only ack if we either attempted to start this job or
|
129
|
+
# successfully completed it. This prevents us from
|
130
|
+
# losing jobs if a middleware raises an exception before yielding
|
131
|
+
ack = true
|
132
|
+
execute_job(worker, cloned(job['args'.freeze]))
|
53
133
|
end
|
54
134
|
end
|
135
|
+
ack = true
|
55
136
|
rescue Sidekiq::Shutdown
|
56
137
|
# Had to force kill this job because it didn't finish
|
57
138
|
# within the timeout. Don't acknowledge the work since
|
58
139
|
# we didn't properly finish it.
|
59
140
|
ack = false
|
60
141
|
rescue Exception => ex
|
61
|
-
handle_exception(ex,
|
142
|
+
handle_exception(ex, job || { :job => jobstr })
|
62
143
|
raise
|
63
144
|
ensure
|
64
145
|
work.acknowledge if ack
|
65
146
|
end
|
66
|
-
|
67
|
-
@boss.async.processor_done(current_actor)
|
68
|
-
end
|
69
|
-
|
70
|
-
def inspect
|
71
|
-
"<Processor##{object_id.to_s(16)}>"
|
72
147
|
end
|
73
148
|
|
74
149
|
def execute_job(worker, cloned_args)
|
75
150
|
worker.perform(*cloned_args)
|
76
151
|
end
|
77
152
|
|
78
|
-
private
|
79
|
-
|
80
153
|
def thread_identity
|
81
154
|
@str ||= Thread.current.object_id.to_s(36)
|
82
155
|
end
|
83
156
|
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
conn.hmset("#{identity}:workers", thread_identity, hash)
|
92
|
-
conn.expire("#{identity}:workers", 60*60*4)
|
93
|
-
end
|
94
|
-
end
|
95
|
-
end
|
157
|
+
WORKER_STATE = Concurrent::Map.new
|
158
|
+
PROCESSED = Concurrent::AtomicFixnum.new
|
159
|
+
FAILURE = Concurrent::AtomicFixnum.new
|
160
|
+
|
161
|
+
def stats(worker, job, queue)
|
162
|
+
tid = thread_identity
|
163
|
+
WORKER_STATE[tid] = {:queue => queue, :payload => job, :run_at => Time.now.to_i }
|
96
164
|
|
97
165
|
begin
|
98
166
|
yield
|
99
167
|
rescue Exception
|
100
|
-
|
101
|
-
failed = "stat:failed:#{Time.now.utc.to_date}"
|
102
|
-
Sidekiq.redis do |conn|
|
103
|
-
conn.multi do
|
104
|
-
conn.incrby("stat:failed", 1)
|
105
|
-
conn.incrby(failed, 1)
|
106
|
-
conn.expire(failed, STATS_TIMEOUT)
|
107
|
-
end
|
108
|
-
end
|
109
|
-
end
|
168
|
+
FAILURE.increment
|
110
169
|
raise
|
111
170
|
ensure
|
112
|
-
|
113
|
-
|
114
|
-
Sidekiq.redis do |conn|
|
115
|
-
conn.multi do
|
116
|
-
conn.hdel("#{identity}:workers", thread_identity)
|
117
|
-
conn.incrby("stat:processed", 1)
|
118
|
-
conn.incrby(processed, 1)
|
119
|
-
conn.expire(processed, STATS_TIMEOUT)
|
120
|
-
end
|
121
|
-
end
|
122
|
-
end
|
171
|
+
WORKER_STATE.delete(tid)
|
172
|
+
PROCESSED.increment
|
123
173
|
end
|
124
174
|
end
|
125
175
|
|
126
176
|
# Deep clone the arguments passed to the worker so that if
|
127
|
-
# the
|
177
|
+
# the job fails, what is pushed back onto Redis hasn't
|
128
178
|
# been mutated by the worker.
|
129
179
|
def cloned(ary)
|
130
180
|
Marshal.load(Marshal.dump(ary))
|
131
181
|
end
|
132
182
|
|
133
|
-
# If an exception occurs in the block passed to this method, that block will be retried up to max_retries times.
|
134
|
-
# All exceptions will be swallowed and logged.
|
135
|
-
def retry_and_suppress_exceptions(max_retries = 5)
|
136
|
-
retry_count = 0
|
137
|
-
begin
|
138
|
-
yield
|
139
|
-
rescue => e
|
140
|
-
retry_count += 1
|
141
|
-
if retry_count <= max_retries
|
142
|
-
Sidekiq.logger.debug {"Suppressing and retrying error: #{e.inspect}"}
|
143
|
-
pause_for_recovery(retry_count)
|
144
|
-
retry
|
145
|
-
else
|
146
|
-
handle_exception(e, { :message => "Exhausted #{max_retries} retries"})
|
147
|
-
end
|
148
|
-
end
|
149
|
-
end
|
150
|
-
|
151
|
-
def pause_for_recovery(retry_count)
|
152
|
-
sleep(retry_count)
|
153
|
-
end
|
154
183
|
end
|
155
184
|
end
|
@@ -9,10 +9,11 @@ module Sidekiq
|
|
9
9
|
def create(options={})
|
10
10
|
options[:url] ||= determine_redis_provider
|
11
11
|
|
12
|
-
|
13
|
-
|
14
|
-
|
12
|
+
size = options[:size] || (Sidekiq.server? ? (Sidekiq.options[:concurrency] + 5) : 5)
|
13
|
+
|
14
|
+
verify_sizing(size, Sidekiq.options[:concurrency]) if Sidekiq.server?
|
15
15
|
|
16
|
+
pool_timeout = options[:pool_timeout] || 1
|
16
17
|
log_info(options)
|
17
18
|
|
18
19
|
ConnectionPool.new(:timeout => pool_timeout, :size => size) do
|
@@ -22,13 +23,30 @@ module Sidekiq
|
|
22
23
|
|
23
24
|
private
|
24
25
|
|
26
|
+
# Sidekiq needs a lot of concurrent Redis connections.
|
27
|
+
#
|
28
|
+
# We need a connection for each Processor.
|
29
|
+
# We need a connection for Pro's real-time change listener
|
30
|
+
# We need a connection to various features to call Redis every few seconds:
|
31
|
+
# - the process heartbeat.
|
32
|
+
# - enterprise's leader election
|
33
|
+
# - enterprise's cron support
|
34
|
+
def verify_sizing(size, concurrency)
|
35
|
+
raise ArgumentError, "Your Redis connection pool is too small for Sidekiq to work, your pool has #{size} connections but really needs to have at least #{concurrency + 2}" if size <= concurrency
|
36
|
+
end
|
37
|
+
|
25
38
|
def build_client(options)
|
26
39
|
namespace = options[:namespace]
|
27
40
|
|
28
41
|
client = Redis.new client_opts(options)
|
29
42
|
if namespace
|
30
|
-
|
31
|
-
|
43
|
+
begin
|
44
|
+
require 'redis/namespace'
|
45
|
+
Redis::Namespace.new(namespace, :redis => client)
|
46
|
+
rescue LoadError
|
47
|
+
Sidekiq.logger.error("redis-namespace gem not included in Gemfile, cannot use namespace '#{namespace}'")
|
48
|
+
exit(-127)
|
49
|
+
end
|
32
50
|
else
|
33
51
|
client
|
34
52
|
end
|
data/lib/sidekiq/scheduled.rb
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
require 'sidekiq'
|
2
2
|
require 'sidekiq/util'
|
3
|
-
require 'sidekiq/actor'
|
4
3
|
require 'sidekiq/api'
|
5
4
|
|
6
5
|
module Sidekiq
|
@@ -17,7 +16,7 @@ module Sidekiq
|
|
17
16
|
# We need to go through the list one at a time to reduce the risk of something
|
18
17
|
# going wrong between the time jobs are popped from the scheduled queue and when
|
19
18
|
# they are pushed onto a work queue and losing the jobs.
|
20
|
-
while job = conn.zrangebyscore(sorted_set, '-inf', now, :limit => [0, 1]).first do
|
19
|
+
while job = conn.zrangebyscore(sorted_set, '-inf'.freeze, now, :limit => [0, 1]).first do
|
21
20
|
|
22
21
|
# Pop item off the queue and add it to the work queue. If the job can't be popped from
|
23
22
|
# the queue, it's because another process already popped it so we can move on to the
|
@@ -39,33 +38,56 @@ module Sidekiq
|
|
39
38
|
# workers can pick it up like any other job.
|
40
39
|
class Poller
|
41
40
|
include Util
|
42
|
-
include Actor
|
43
41
|
|
44
42
|
INITIAL_WAIT = 10
|
45
43
|
|
46
44
|
def initialize
|
47
45
|
@enq = (Sidekiq.options[:scheduled_enq] || Sidekiq::Scheduled::Enq).new
|
46
|
+
@sleeper = ConnectionPool::TimedStack.new
|
47
|
+
@done = false
|
48
48
|
end
|
49
49
|
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
50
|
+
# Shut down this instance, will pause until the thread is dead.
|
51
|
+
def terminate
|
52
|
+
@done = true
|
53
|
+
if @thread
|
54
|
+
t = @thread
|
55
|
+
@thread = nil
|
56
|
+
@sleeper << 0
|
57
|
+
t.value
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def start
|
62
|
+
@thread ||= safe_thread("scheduler") do
|
63
|
+
initial_wait
|
64
|
+
|
65
|
+
while !@done
|
66
|
+
enqueue
|
67
|
+
wait
|
61
68
|
end
|
69
|
+
Sidekiq.logger.info("Scheduler exiting...")
|
70
|
+
end
|
71
|
+
end
|
62
72
|
|
63
|
-
|
73
|
+
def enqueue
|
74
|
+
begin
|
75
|
+
@enq.enqueue_jobs
|
76
|
+
rescue => ex
|
77
|
+
# Most likely a problem with redis networking.
|
78
|
+
# Punt and try again at the next interval
|
79
|
+
logger.error ex.message
|
80
|
+
logger.error ex.backtrace.first
|
64
81
|
end
|
65
82
|
end
|
66
83
|
|
67
84
|
private
|
68
85
|
|
86
|
+
def wait
|
87
|
+
@sleeper.pop(random_poll_interval)
|
88
|
+
rescue Timeout::Error
|
89
|
+
end
|
90
|
+
|
69
91
|
# Calculates a random interval that is ±50% the desired average.
|
70
92
|
def random_poll_interval
|
71
93
|
poll_interval_average * rand + poll_interval_average.to_f / 2
|
@@ -83,7 +105,7 @@ module Sidekiq
|
|
83
105
|
# all your Sidekiq processes at the same time will lead to them all polling at
|
84
106
|
# the same time: the thundering herd problem.
|
85
107
|
#
|
86
|
-
# We only do this if
|
108
|
+
# We only do this if poll_interval_average is unset (the default).
|
87
109
|
def poll_interval_average
|
88
110
|
Sidekiq.options[:poll_interval_average] ||= scaled_poll_interval
|
89
111
|
end
|
@@ -98,16 +120,15 @@ module Sidekiq
|
|
98
120
|
end
|
99
121
|
|
100
122
|
def initial_wait
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
end
|
123
|
+
# Have all processes sleep between 5-15 seconds. 10 seconds
|
124
|
+
# to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
|
125
|
+
# of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
|
126
|
+
total = 0
|
127
|
+
total += INITIAL_WAIT unless Sidekiq.options[:poll_interval_average]
|
128
|
+
total += (5 * rand)
|
129
|
+
|
130
|
+
@sleeper.pop(total)
|
131
|
+
rescue Timeout::Error
|
111
132
|
end
|
112
133
|
|
113
134
|
end
|
data/lib/sidekiq/testing.rb
CHANGED
@@ -48,6 +48,12 @@ module Sidekiq
|
|
48
48
|
def inline?
|
49
49
|
self.__test_mode == :inline
|
50
50
|
end
|
51
|
+
|
52
|
+
def server_middleware
|
53
|
+
@server_chain ||= Middleware::Chain.new
|
54
|
+
yield @server_chain if block_given?
|
55
|
+
@server_chain
|
56
|
+
end
|
51
57
|
end
|
52
58
|
end
|
53
59
|
|
@@ -62,15 +68,15 @@ module Sidekiq
|
|
62
68
|
def raw_push(payloads)
|
63
69
|
if Sidekiq::Testing.fake?
|
64
70
|
payloads.each do |job|
|
65
|
-
job['
|
71
|
+
Queues.jobs[job['queue']] << Sidekiq.load_json(Sidekiq.dump_json(job))
|
66
72
|
end
|
67
73
|
true
|
68
74
|
elsif Sidekiq::Testing.inline?
|
69
75
|
payloads.each do |job|
|
70
|
-
job['jid'] ||= SecureRandom.hex(12)
|
71
76
|
klass = job['class'].constantize
|
72
|
-
|
73
|
-
|
77
|
+
job['id'] ||= SecureRandom.hex(12)
|
78
|
+
job_hash = Sidekiq.load_json(Sidekiq.dump_json(job))
|
79
|
+
klass.process_job(job_hash)
|
74
80
|
end
|
75
81
|
true
|
76
82
|
else
|
@@ -79,6 +85,64 @@ module Sidekiq
|
|
79
85
|
end
|
80
86
|
end
|
81
87
|
|
88
|
+
module Queues
|
89
|
+
##
|
90
|
+
# The Queues class is only for testing the fake queue implementation.
|
91
|
+
# The data is structured as a hash with queue name as hash key and array
|
92
|
+
# of job data as the value.
|
93
|
+
#
|
94
|
+
# {
|
95
|
+
# "default"=>[
|
96
|
+
# {
|
97
|
+
# "class"=>"TestTesting::QueueWorker",
|
98
|
+
# "args"=>[1, 2],
|
99
|
+
# "retry"=>true,
|
100
|
+
# "queue"=>"default",
|
101
|
+
# "jid"=>"abc5b065c5c4b27fc1102833",
|
102
|
+
# "created_at"=>1447445554.419934
|
103
|
+
# }
|
104
|
+
# ]
|
105
|
+
# }
|
106
|
+
#
|
107
|
+
# Example:
|
108
|
+
#
|
109
|
+
# require 'sidekiq/testing'
|
110
|
+
#
|
111
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
112
|
+
# HardWorker.perform_async(:something)
|
113
|
+
# assert_equal 1, Sidekiq::Queues["default"].size
|
114
|
+
# assert_equal :something, Sidekiq::Queues["default"].first['args'][0]
|
115
|
+
#
|
116
|
+
# You can also clear all workers' jobs:
|
117
|
+
#
|
118
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
119
|
+
# HardWorker.perform_async(:something)
|
120
|
+
# Sidekiq::Queues.clear_all
|
121
|
+
# assert_equal 0, Sidekiq::Queues["default"].size
|
122
|
+
#
|
123
|
+
# This can be useful to make sure jobs don't linger between tests:
|
124
|
+
#
|
125
|
+
# RSpec.configure do |config|
|
126
|
+
# config.before(:each) do
|
127
|
+
# Sidekiq::Queues.clear_all
|
128
|
+
# end
|
129
|
+
# end
|
130
|
+
#
|
131
|
+
class << self
|
132
|
+
def [](queue)
|
133
|
+
jobs[queue.to_s]
|
134
|
+
end
|
135
|
+
|
136
|
+
def jobs
|
137
|
+
@jobs ||= Hash.new { |hash, key| hash[key] = [] }
|
138
|
+
end
|
139
|
+
|
140
|
+
def clear_all
|
141
|
+
jobs.clear
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
82
146
|
module Worker
|
83
147
|
##
|
84
148
|
# The Sidekiq testing infrastructure overrides perform_async
|
@@ -137,34 +201,45 @@ module Sidekiq
|
|
137
201
|
#
|
138
202
|
module ClassMethods
|
139
203
|
|
204
|
+
# Queue for this worker
|
205
|
+
def queue
|
206
|
+
self.sidekiq_options["queue"].to_s
|
207
|
+
end
|
208
|
+
|
140
209
|
# Jobs queued for this worker
|
141
210
|
def jobs
|
142
|
-
|
211
|
+
Queues.jobs[queue].select { |job| job["class"] == self.to_s }
|
143
212
|
end
|
144
213
|
|
145
214
|
# Clear all jobs for this worker
|
146
215
|
def clear
|
147
|
-
jobs.clear
|
216
|
+
Queues.jobs[queue].clear
|
148
217
|
end
|
149
218
|
|
150
219
|
# Drain and run all jobs for this worker
|
151
220
|
def drain
|
152
|
-
while
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
execute_job(worker, job['args'])
|
221
|
+
while jobs.any?
|
222
|
+
next_job = jobs.first
|
223
|
+
Queues.jobs[queue].delete_if { |job| job["jid"] == next_job["jid"] }
|
224
|
+
process_job(next_job)
|
157
225
|
end
|
158
226
|
end
|
159
227
|
|
160
228
|
# Pop out a single job and perform it
|
161
229
|
def perform_one
|
162
230
|
raise(EmptyQueueError, "perform_one called with empty job queue") if jobs.empty?
|
163
|
-
|
231
|
+
next_job = jobs.first
|
232
|
+
Queues.jobs[queue].delete_if { |job| job["jid"] == next_job["jid"] }
|
233
|
+
process_job(next_job)
|
234
|
+
end
|
235
|
+
|
236
|
+
def process_job(job)
|
164
237
|
worker = new
|
165
238
|
worker.jid = job['jid']
|
166
239
|
worker.bid = job['bid'] if worker.respond_to?(:bid=)
|
167
|
-
|
240
|
+
Sidekiq::Testing.server_middleware.invoke(worker, job, job['queue']) do
|
241
|
+
execute_job(worker, job['args'])
|
242
|
+
end
|
168
243
|
end
|
169
244
|
|
170
245
|
def execute_job(worker, args)
|
@@ -174,18 +249,22 @@ module Sidekiq
|
|
174
249
|
|
175
250
|
class << self
|
176
251
|
def jobs # :nodoc:
|
177
|
-
|
252
|
+
Queues.jobs.values.flatten
|
178
253
|
end
|
179
254
|
|
180
255
|
# Clear all queued jobs across all workers
|
181
256
|
def clear_all
|
182
|
-
|
257
|
+
Queues.clear_all
|
183
258
|
end
|
184
259
|
|
185
260
|
# Drain all queued jobs across all workers
|
186
261
|
def drain_all
|
187
|
-
|
188
|
-
jobs.
|
262
|
+
while jobs.any?
|
263
|
+
worker_classes = jobs.map { |job| job["class"] }.uniq
|
264
|
+
|
265
|
+
worker_classes.each do |worker_class|
|
266
|
+
worker_class.constantize.drain
|
267
|
+
end
|
189
268
|
end
|
190
269
|
end
|
191
270
|
end
|