sidekiq 3.5.4 → 5.2.7
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +5 -5
- data/.circleci/config.yml +61 -0
- data/{Contributing.md → .github/contributing.md} +0 -0
- data/.github/issue_template.md +11 -0
- data/.gitignore +3 -0
- data/.travis.yml +5 -10
- data/4.0-Upgrade.md +53 -0
- data/5.0-Upgrade.md +56 -0
- data/COMM-LICENSE +13 -11
- data/Changes.md +376 -1
- data/Ent-Changes.md +201 -2
- data/Gemfile +14 -18
- data/LICENSE +1 -1
- data/Pro-3.0-Upgrade.md +44 -0
- data/Pro-4.0-Upgrade.md +35 -0
- data/Pro-Changes.md +307 -2
- data/README.md +34 -22
- data/Rakefile +3 -3
- data/bin/sidekiq +0 -1
- data/bin/sidekiqctl +13 -86
- data/bin/sidekiqload +23 -27
- data/code_of_conduct.md +50 -0
- data/lib/generators/sidekiq/templates/worker_spec.rb.erb +3 -3
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +6 -6
- data/lib/sidekiq.rb +72 -25
- data/lib/sidekiq/api.rb +206 -73
- data/lib/sidekiq/cli.rb +145 -101
- data/lib/sidekiq/client.rb +42 -36
- data/lib/sidekiq/core_ext.rb +1 -105
- data/lib/sidekiq/ctl.rb +221 -0
- data/lib/sidekiq/delay.rb +42 -0
- data/lib/sidekiq/exception_handler.rb +4 -5
- data/lib/sidekiq/extensions/action_mailer.rb +1 -0
- data/lib/sidekiq/extensions/active_record.rb +1 -0
- data/lib/sidekiq/extensions/class_methods.rb +1 -0
- data/lib/sidekiq/extensions/generic_proxy.rb +8 -1
- data/lib/sidekiq/fetch.rb +36 -111
- data/lib/sidekiq/job_logger.rb +25 -0
- data/lib/sidekiq/job_retry.rb +262 -0
- data/lib/sidekiq/launcher.rb +129 -55
- data/lib/sidekiq/logging.rb +21 -3
- data/lib/sidekiq/manager.rb +83 -182
- data/lib/sidekiq/middleware/chain.rb +1 -0
- data/lib/sidekiq/middleware/i18n.rb +1 -0
- data/lib/sidekiq/middleware/server/active_record.rb +10 -0
- data/lib/sidekiq/paginator.rb +1 -0
- data/lib/sidekiq/processor.rb +221 -103
- data/lib/sidekiq/rails.rb +47 -27
- data/lib/sidekiq/redis_connection.rb +74 -7
- data/lib/sidekiq/scheduled.rb +87 -28
- data/lib/sidekiq/testing.rb +150 -19
- data/lib/sidekiq/testing/inline.rb +1 -0
- data/lib/sidekiq/util.rb +15 -17
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web.rb +120 -184
- data/lib/sidekiq/web/action.rb +89 -0
- data/lib/sidekiq/web/application.rb +353 -0
- data/lib/sidekiq/{web_helpers.rb → web/helpers.rb} +123 -47
- data/lib/sidekiq/web/router.rb +100 -0
- data/lib/sidekiq/worker.rb +135 -18
- data/sidekiq.gemspec +8 -14
- data/web/assets/images/{status-sd8051fd480.png → status.png} +0 -0
- data/web/assets/javascripts/application.js +24 -20
- data/web/assets/javascripts/dashboard.js +33 -18
- data/web/assets/stylesheets/application-rtl.css +246 -0
- data/web/assets/stylesheets/application.css +401 -7
- data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
- data/web/assets/stylesheets/bootstrap.css +4 -8
- data/web/locales/ar.yml +81 -0
- data/web/locales/cs.yml +11 -1
- data/web/locales/de.yml +1 -1
- data/web/locales/en.yml +4 -0
- data/web/locales/es.yml +4 -3
- data/web/locales/fa.yml +80 -0
- data/web/locales/fr.yml +21 -12
- data/web/locales/he.yml +79 -0
- data/web/locales/ja.yml +24 -13
- data/web/locales/ru.yml +3 -0
- data/web/locales/ur.yml +80 -0
- data/web/views/_footer.erb +7 -9
- data/web/views/_job_info.erb +5 -1
- data/web/views/_nav.erb +5 -19
- data/web/views/_paging.erb +1 -1
- data/web/views/busy.erb +18 -9
- data/web/views/dashboard.erb +5 -5
- data/web/views/dead.erb +1 -1
- data/web/views/layout.erb +13 -5
- data/web/views/morgue.erb +16 -12
- data/web/views/queue.erb +12 -11
- data/web/views/queues.erb +5 -3
- data/web/views/retries.erb +19 -13
- data/web/views/retry.erb +2 -2
- data/web/views/scheduled.erb +4 -4
- data/web/views/scheduled_job_info.erb +1 -1
- metadata +45 -227
- data/lib/sidekiq/actor.rb +0 -39
- data/lib/sidekiq/middleware/server/logging.rb +0 -40
- data/lib/sidekiq/middleware/server/retry_jobs.rb +0 -206
- data/test/config.yml +0 -9
- data/test/env_based_config.yml +0 -11
- data/test/fake_env.rb +0 -0
- data/test/fixtures/en.yml +0 -2
- data/test/helper.rb +0 -49
- data/test/test_api.rb +0 -493
- data/test/test_cli.rb +0 -335
- data/test/test_client.rb +0 -194
- data/test/test_exception_handler.rb +0 -55
- data/test/test_extensions.rb +0 -126
- data/test/test_fetch.rb +0 -104
- data/test/test_logging.rb +0 -34
- data/test/test_manager.rb +0 -168
- data/test/test_middleware.rb +0 -159
- data/test/test_processor.rb +0 -237
- data/test/test_rails.rb +0 -21
- data/test/test_redis_connection.rb +0 -126
- data/test/test_retry.rb +0 -325
- data/test/test_scheduled.rb +0 -114
- data/test/test_scheduling.rb +0 -49
- data/test/test_sidekiq.rb +0 -99
- data/test/test_testing.rb +0 -142
- data/test/test_testing_fake.rb +0 -268
- data/test/test_testing_inline.rb +0 -93
- data/test/test_util.rb +0 -16
- data/test/test_web.rb +0 -608
- data/test/test_web_helpers.rb +0 -53
- data/web/assets/images/bootstrap/glyphicons-halflings-white.png +0 -0
- data/web/assets/images/bootstrap/glyphicons-halflings.png +0 -0
- data/web/assets/images/status/active.png +0 -0
- data/web/assets/images/status/idle.png +0 -0
- data/web/assets/javascripts/locales/README.md +0 -27
- data/web/assets/javascripts/locales/jquery.timeago.ar.js +0 -96
- data/web/assets/javascripts/locales/jquery.timeago.bg.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.bs.js +0 -49
- data/web/assets/javascripts/locales/jquery.timeago.ca.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.cs.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.cy.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.da.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.de.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.el.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.en-short.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.en.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.es.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.et.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.fa.js +0 -22
- data/web/assets/javascripts/locales/jquery.timeago.fi.js +0 -28
- data/web/assets/javascripts/locales/jquery.timeago.fr-short.js +0 -16
- data/web/assets/javascripts/locales/jquery.timeago.fr.js +0 -17
- data/web/assets/javascripts/locales/jquery.timeago.he.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.hr.js +0 -49
- data/web/assets/javascripts/locales/jquery.timeago.hu.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.hy.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.id.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.it.js +0 -16
- data/web/assets/javascripts/locales/jquery.timeago.ja.js +0 -19
- data/web/assets/javascripts/locales/jquery.timeago.ko.js +0 -17
- data/web/assets/javascripts/locales/jquery.timeago.lt.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.mk.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.nl.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.no.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.pl.js +0 -31
- data/web/assets/javascripts/locales/jquery.timeago.pt-br.js +0 -16
- data/web/assets/javascripts/locales/jquery.timeago.pt.js +0 -16
- data/web/assets/javascripts/locales/jquery.timeago.ro.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.rs.js +0 -49
- data/web/assets/javascripts/locales/jquery.timeago.ru.js +0 -34
- data/web/assets/javascripts/locales/jquery.timeago.sk.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.sl.js +0 -44
- data/web/assets/javascripts/locales/jquery.timeago.sv.js +0 -18
- data/web/assets/javascripts/locales/jquery.timeago.th.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.tr.js +0 -16
- data/web/assets/javascripts/locales/jquery.timeago.uk.js +0 -34
- data/web/assets/javascripts/locales/jquery.timeago.uz.js +0 -19
- data/web/assets/javascripts/locales/jquery.timeago.zh-cn.js +0 -20
- data/web/assets/javascripts/locales/jquery.timeago.zh-tw.js +0 -20
- data/web/views/_poll_js.erb +0 -5
data/lib/sidekiq/actor.rb
DELETED
@@ -1,39 +0,0 @@
|
|
1
|
-
module Sidekiq
|
2
|
-
module Actor
|
3
|
-
|
4
|
-
module ClassMethods
|
5
|
-
def trap_exit(*args)
|
6
|
-
end
|
7
|
-
def new_link(*args)
|
8
|
-
new(*args)
|
9
|
-
end
|
10
|
-
end
|
11
|
-
|
12
|
-
module InstanceMethods
|
13
|
-
def current_actor
|
14
|
-
self
|
15
|
-
end
|
16
|
-
def after(interval)
|
17
|
-
end
|
18
|
-
def alive?
|
19
|
-
@dead = false unless defined?(@dead)
|
20
|
-
!@dead
|
21
|
-
end
|
22
|
-
def terminate
|
23
|
-
@dead = true
|
24
|
-
end
|
25
|
-
def defer
|
26
|
-
yield
|
27
|
-
end
|
28
|
-
end
|
29
|
-
|
30
|
-
def self.included(klass)
|
31
|
-
if $TESTING
|
32
|
-
klass.__send__(:include, InstanceMethods)
|
33
|
-
klass.__send__(:extend, ClassMethods)
|
34
|
-
else
|
35
|
-
klass.__send__(:include, Celluloid)
|
36
|
-
end
|
37
|
-
end
|
38
|
-
end
|
39
|
-
end
|
@@ -1,40 +0,0 @@
|
|
1
|
-
module Sidekiq
|
2
|
-
module Middleware
|
3
|
-
module Server
|
4
|
-
class Logging
|
5
|
-
|
6
|
-
def call(worker, item, queue)
|
7
|
-
Sidekiq::Logging.with_context(log_context(worker, item)) do
|
8
|
-
begin
|
9
|
-
start = Time.now
|
10
|
-
logger.info { "start" }
|
11
|
-
yield
|
12
|
-
logger.info { "done: #{elapsed(start)} sec" }
|
13
|
-
rescue Exception
|
14
|
-
logger.info { "fail: #{elapsed(start)} sec" }
|
15
|
-
raise
|
16
|
-
end
|
17
|
-
end
|
18
|
-
end
|
19
|
-
|
20
|
-
private
|
21
|
-
|
22
|
-
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
23
|
-
# attribute to expose the underlying thing.
|
24
|
-
def log_context(worker, item)
|
25
|
-
klass = item['wrapped'.freeze] || worker.class.to_s
|
26
|
-
"#{klass} JID-#{item['jid'.freeze]}#{" BID-#{item['bid'.freeze]}" if item['bid'.freeze]}"
|
27
|
-
end
|
28
|
-
|
29
|
-
def elapsed(start)
|
30
|
-
(Time.now - start).round(3)
|
31
|
-
end
|
32
|
-
|
33
|
-
def logger
|
34
|
-
Sidekiq.logger
|
35
|
-
end
|
36
|
-
end
|
37
|
-
end
|
38
|
-
end
|
39
|
-
end
|
40
|
-
|
@@ -1,206 +0,0 @@
|
|
1
|
-
require 'sidekiq/scheduled'
|
2
|
-
require 'sidekiq/api'
|
3
|
-
|
4
|
-
module Sidekiq
|
5
|
-
module Middleware
|
6
|
-
module Server
|
7
|
-
##
|
8
|
-
# Automatically retry jobs that fail in Sidekiq.
|
9
|
-
# Sidekiq's retry support assumes a typical development lifecycle:
|
10
|
-
#
|
11
|
-
# 0. push some code changes with a bug in it
|
12
|
-
# 1. bug causes job processing to fail, sidekiq's middleware captures
|
13
|
-
# the job and pushes it onto a retry queue
|
14
|
-
# 2. sidekiq retries jobs in the retry queue multiple times with
|
15
|
-
# an exponential delay, the job continues to fail
|
16
|
-
# 3. after a few days, a developer deploys a fix. the job is
|
17
|
-
# reprocessed successfully.
|
18
|
-
# 4. once retries are exhausted, sidekiq will give up and move the
|
19
|
-
# job to the Dead Job Queue (aka morgue) where it must be dealt with
|
20
|
-
# manually in the Web UI.
|
21
|
-
# 5. After 6 months on the DJQ, Sidekiq will discard the job.
|
22
|
-
#
|
23
|
-
# A job looks like:
|
24
|
-
#
|
25
|
-
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
|
26
|
-
#
|
27
|
-
# The 'retry' option also accepts a number (in place of 'true'):
|
28
|
-
#
|
29
|
-
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
|
30
|
-
#
|
31
|
-
# The job will be retried this number of times before giving up. (If simply
|
32
|
-
# 'true', Sidekiq retries 25 times)
|
33
|
-
#
|
34
|
-
# We'll add a bit more data to the job to support retries:
|
35
|
-
#
|
36
|
-
# * 'queue' - the queue to use
|
37
|
-
# * 'retry_count' - number of times we've retried so far.
|
38
|
-
# * 'error_message' - the message from the exception
|
39
|
-
# * 'error_class' - the exception class
|
40
|
-
# * 'failed_at' - the first time it failed
|
41
|
-
# * 'retried_at' - the last time it was retried
|
42
|
-
# * 'backtrace' - the number of lines of error backtrace to store
|
43
|
-
#
|
44
|
-
# We don't store the backtrace by default as that can add a lot of overhead
|
45
|
-
# to the job and everyone is using an error service, right?
|
46
|
-
#
|
47
|
-
# The default number of retry attempts is 25 which works out to about 3 weeks
|
48
|
-
# of retries. You can pass a value for the max number of retry attempts when
|
49
|
-
# adding the middleware using the options hash:
|
50
|
-
#
|
51
|
-
# Sidekiq.configure_server do |config|
|
52
|
-
# config.server_middleware do |chain|
|
53
|
-
# chain.add Sidekiq::Middleware::Server::RetryJobs, :max_retries => 7
|
54
|
-
# end
|
55
|
-
# end
|
56
|
-
#
|
57
|
-
# or limit the number of retries for a particular worker with:
|
58
|
-
#
|
59
|
-
# class MyWorker
|
60
|
-
# include Sidekiq::Worker
|
61
|
-
# sidekiq_options :retry => 10
|
62
|
-
# end
|
63
|
-
#
|
64
|
-
class RetryJobs
|
65
|
-
include Sidekiq::Util
|
66
|
-
|
67
|
-
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
68
|
-
|
69
|
-
def initialize(options = {})
|
70
|
-
@max_retries = options.fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
|
71
|
-
end
|
72
|
-
|
73
|
-
def call(worker, msg, queue)
|
74
|
-
yield
|
75
|
-
rescue Sidekiq::Shutdown
|
76
|
-
# ignore, will be pushed back onto queue during hard_shutdown
|
77
|
-
raise
|
78
|
-
rescue Exception => e
|
79
|
-
# ignore, will be pushed back onto queue during hard_shutdown
|
80
|
-
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
81
|
-
|
82
|
-
raise e unless msg['retry']
|
83
|
-
attempt_retry(worker, msg, queue, e)
|
84
|
-
end
|
85
|
-
|
86
|
-
private
|
87
|
-
|
88
|
-
def attempt_retry(worker, msg, queue, exception)
|
89
|
-
max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
|
90
|
-
|
91
|
-
msg['queue'] = if msg['retry_queue']
|
92
|
-
msg['retry_queue']
|
93
|
-
else
|
94
|
-
queue
|
95
|
-
end
|
96
|
-
|
97
|
-
# App code can stuff all sorts of crazy binary data into the error message
|
98
|
-
# that won't convert to JSON.
|
99
|
-
m = exception.message.to_s[0, 10_000]
|
100
|
-
if m.respond_to?(:scrub!)
|
101
|
-
m.force_encoding("utf-8")
|
102
|
-
m.scrub!
|
103
|
-
end
|
104
|
-
|
105
|
-
msg['error_message'] = m
|
106
|
-
msg['error_class'] = exception.class.name
|
107
|
-
count = if msg['retry_count']
|
108
|
-
msg['retried_at'] = Time.now.to_f
|
109
|
-
msg['retry_count'] += 1
|
110
|
-
else
|
111
|
-
msg['failed_at'] = Time.now.to_f
|
112
|
-
msg['retry_count'] = 0
|
113
|
-
end
|
114
|
-
|
115
|
-
if msg['backtrace'] == true
|
116
|
-
msg['error_backtrace'] = exception.backtrace
|
117
|
-
elsif !msg['backtrace']
|
118
|
-
# do nothing
|
119
|
-
elsif msg['backtrace'].to_i != 0
|
120
|
-
msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
|
121
|
-
end
|
122
|
-
|
123
|
-
if count < max_retry_attempts
|
124
|
-
delay = delay_for(worker, count, exception)
|
125
|
-
logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
126
|
-
retry_at = Time.now.to_f + delay
|
127
|
-
payload = Sidekiq.dump_json(msg)
|
128
|
-
Sidekiq.redis do |conn|
|
129
|
-
conn.zadd('retry', retry_at.to_s, payload)
|
130
|
-
end
|
131
|
-
else
|
132
|
-
# Goodbye dear message, you (re)tried your best I'm sure.
|
133
|
-
retries_exhausted(worker, msg)
|
134
|
-
end
|
135
|
-
|
136
|
-
raise exception
|
137
|
-
end
|
138
|
-
|
139
|
-
def retries_exhausted(worker, msg)
|
140
|
-
logger.debug { "Dropping message after hitting the retry maximum: #{msg}" }
|
141
|
-
begin
|
142
|
-
if worker.sidekiq_retries_exhausted_block?
|
143
|
-
worker.sidekiq_retries_exhausted_block.call(msg)
|
144
|
-
end
|
145
|
-
rescue => e
|
146
|
-
handle_exception(e, { context: "Error calling retries_exhausted for #{worker.class}", job: msg })
|
147
|
-
end
|
148
|
-
|
149
|
-
send_to_morgue(msg) unless msg['dead'] == false
|
150
|
-
end
|
151
|
-
|
152
|
-
def send_to_morgue(msg)
|
153
|
-
Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
|
154
|
-
payload = Sidekiq.dump_json(msg)
|
155
|
-
now = Time.now.to_f
|
156
|
-
Sidekiq.redis do |conn|
|
157
|
-
conn.multi do
|
158
|
-
conn.zadd('dead', now, payload)
|
159
|
-
conn.zremrangebyscore('dead', '-inf', now - DeadSet.timeout)
|
160
|
-
conn.zremrangebyrank('dead', 0, -DeadSet.max_jobs)
|
161
|
-
end
|
162
|
-
end
|
163
|
-
end
|
164
|
-
|
165
|
-
def retry_attempts_from(msg_retry, default)
|
166
|
-
if msg_retry.is_a?(Fixnum)
|
167
|
-
msg_retry
|
168
|
-
else
|
169
|
-
default
|
170
|
-
end
|
171
|
-
end
|
172
|
-
|
173
|
-
def delay_for(worker, count, exception)
|
174
|
-
worker.sidekiq_retry_in_block? && retry_in(worker, count, exception) || seconds_to_delay(count)
|
175
|
-
end
|
176
|
-
|
177
|
-
# delayed_job uses the same basic formula
|
178
|
-
def seconds_to_delay(count)
|
179
|
-
(count ** 4) + 15 + (rand(30)*(count+1))
|
180
|
-
end
|
181
|
-
|
182
|
-
def retry_in(worker, count, exception)
|
183
|
-
begin
|
184
|
-
worker.sidekiq_retry_in_block.call(count, exception).to_i
|
185
|
-
rescue Exception => e
|
186
|
-
handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
|
187
|
-
nil
|
188
|
-
end
|
189
|
-
end
|
190
|
-
|
191
|
-
def exception_caused_by_shutdown?(e, checked_causes = [])
|
192
|
-
# In Ruby 2.1.0 only, check if exception is a result of shutdown.
|
193
|
-
return false unless defined?(e.cause)
|
194
|
-
|
195
|
-
# Handle circular causes
|
196
|
-
checked_causes << e.object_id
|
197
|
-
return false if checked_causes.include?(e.cause.object_id)
|
198
|
-
|
199
|
-
e.cause.instance_of?(Sidekiq::Shutdown) ||
|
200
|
-
exception_caused_by_shutdown?(e.cause, checked_causes)
|
201
|
-
end
|
202
|
-
|
203
|
-
end
|
204
|
-
end
|
205
|
-
end
|
206
|
-
end
|
data/test/config.yml
DELETED
data/test/env_based_config.yml
DELETED
data/test/fake_env.rb
DELETED
File without changes
|
data/test/fixtures/en.yml
DELETED
data/test/helper.rb
DELETED
@@ -1,49 +0,0 @@
|
|
1
|
-
$CELLULOID_DEBUG = false
|
2
|
-
$TESTING = true
|
3
|
-
if ENV["COVERAGE"]
|
4
|
-
require 'simplecov'
|
5
|
-
SimpleCov.start do
|
6
|
-
add_filter "/test/"
|
7
|
-
add_filter "/myapp/"
|
8
|
-
end
|
9
|
-
end
|
10
|
-
ENV['RACK_ENV'] = ENV['RAILS_ENV'] = 'test'
|
11
|
-
|
12
|
-
begin
|
13
|
-
require 'pry-byebug'
|
14
|
-
rescue LoadError
|
15
|
-
end
|
16
|
-
|
17
|
-
require 'minitest/autorun'
|
18
|
-
require 'minitest/pride'
|
19
|
-
|
20
|
-
require 'celluloid/current'
|
21
|
-
require 'celluloid/test'
|
22
|
-
Celluloid.boot
|
23
|
-
require 'sidekiq'
|
24
|
-
require 'sidekiq/util'
|
25
|
-
Sidekiq.logger.level = Logger::ERROR
|
26
|
-
|
27
|
-
Sidekiq::Test = Minitest::Test
|
28
|
-
|
29
|
-
require 'sidekiq/redis_connection'
|
30
|
-
REDIS_URL = ENV['REDIS_URL'] || 'redis://localhost/15'
|
31
|
-
REDIS = Sidekiq::RedisConnection.create(:url => REDIS_URL, :namespace => 'testy')
|
32
|
-
|
33
|
-
Sidekiq.configure_client do |config|
|
34
|
-
config.redis = { :url => REDIS_URL, :namespace => 'testy' }
|
35
|
-
end
|
36
|
-
|
37
|
-
def capture_logging(lvl=Logger::INFO)
|
38
|
-
old = Sidekiq.logger
|
39
|
-
begin
|
40
|
-
out = StringIO.new
|
41
|
-
logger = Logger.new(out)
|
42
|
-
logger.level = lvl
|
43
|
-
Sidekiq.logger = logger
|
44
|
-
yield
|
45
|
-
out.string
|
46
|
-
ensure
|
47
|
-
Sidekiq.logger = old
|
48
|
-
end
|
49
|
-
end
|
data/test/test_api.rb
DELETED
@@ -1,493 +0,0 @@
|
|
1
|
-
require_relative 'helper'
|
2
|
-
|
3
|
-
class TestApi < Sidekiq::Test
|
4
|
-
|
5
|
-
describe "stats" do
|
6
|
-
|
7
|
-
it "is initially zero" do
|
8
|
-
Sidekiq.redis {|c| c.flushdb }
|
9
|
-
s = Sidekiq::Stats.new
|
10
|
-
assert_equal 0, s.processed
|
11
|
-
assert_equal 0, s.failed
|
12
|
-
assert_equal 0, s.enqueued
|
13
|
-
end
|
14
|
-
|
15
|
-
describe "processed" do
|
16
|
-
it "returns number of processed jobs" do
|
17
|
-
Sidekiq.redis { |conn| conn.set("stat:processed", 5) }
|
18
|
-
s = Sidekiq::Stats.new
|
19
|
-
assert_equal 5, s.processed
|
20
|
-
end
|
21
|
-
end
|
22
|
-
|
23
|
-
describe "failed" do
|
24
|
-
it "returns number of failed jobs" do
|
25
|
-
Sidekiq.redis { |conn| conn.set("stat:failed", 5) }
|
26
|
-
s = Sidekiq::Stats.new
|
27
|
-
assert_equal 5, s.failed
|
28
|
-
end
|
29
|
-
end
|
30
|
-
|
31
|
-
describe "reset" do
|
32
|
-
before do
|
33
|
-
Sidekiq.redis do |conn|
|
34
|
-
conn.set('stat:processed', 5)
|
35
|
-
conn.set('stat:failed', 10)
|
36
|
-
end
|
37
|
-
end
|
38
|
-
|
39
|
-
it 'will reset all stats by default' do
|
40
|
-
Sidekiq::Stats.new.reset
|
41
|
-
s = Sidekiq::Stats.new
|
42
|
-
assert_equal 0, s.failed
|
43
|
-
assert_equal 0, s.processed
|
44
|
-
end
|
45
|
-
|
46
|
-
it 'can reset individual stats' do
|
47
|
-
Sidekiq::Stats.new.reset('failed')
|
48
|
-
s = Sidekiq::Stats.new
|
49
|
-
assert_equal 0, s.failed
|
50
|
-
assert_equal 5, s.processed
|
51
|
-
end
|
52
|
-
|
53
|
-
it 'can accept anything that responds to #to_s' do
|
54
|
-
Sidekiq::Stats.new.reset(:failed)
|
55
|
-
s = Sidekiq::Stats.new
|
56
|
-
assert_equal 0, s.failed
|
57
|
-
assert_equal 5, s.processed
|
58
|
-
end
|
59
|
-
|
60
|
-
it 'ignores anything other than "failed" or "processed"' do
|
61
|
-
Sidekiq::Stats.new.reset((1..10).to_a, ['failed'])
|
62
|
-
s = Sidekiq::Stats.new
|
63
|
-
assert_equal 0, s.failed
|
64
|
-
assert_equal 5, s.processed
|
65
|
-
end
|
66
|
-
end
|
67
|
-
|
68
|
-
describe "queues" do
|
69
|
-
before do
|
70
|
-
Sidekiq.redis {|c| c.flushdb }
|
71
|
-
end
|
72
|
-
|
73
|
-
it "is initially empty" do
|
74
|
-
s = Sidekiq::Stats::Queues.new
|
75
|
-
assert_equal 0, s.lengths.size
|
76
|
-
end
|
77
|
-
|
78
|
-
it "returns a hash of queue and size in order" do
|
79
|
-
Sidekiq.redis do |conn|
|
80
|
-
conn.rpush 'queue:foo', '{}'
|
81
|
-
conn.sadd 'queues', 'foo'
|
82
|
-
|
83
|
-
3.times { conn.rpush 'queue:bar', '{}' }
|
84
|
-
conn.sadd 'queues', 'bar'
|
85
|
-
end
|
86
|
-
|
87
|
-
s = Sidekiq::Stats::Queues.new
|
88
|
-
assert_equal ({ "foo" => 1, "bar" => 3 }), s.lengths
|
89
|
-
assert_equal "bar", s.lengths.first.first
|
90
|
-
|
91
|
-
assert_equal Sidekiq::Stats.new.queues, Sidekiq::Stats::Queues.new.lengths
|
92
|
-
end
|
93
|
-
end
|
94
|
-
|
95
|
-
describe "enqueued" do
|
96
|
-
it "returns total enqueued jobs" do
|
97
|
-
Sidekiq.redis do |conn|
|
98
|
-
conn.flushdb
|
99
|
-
conn.rpush 'queue:foo', '{}'
|
100
|
-
conn.sadd 'queues', 'foo'
|
101
|
-
|
102
|
-
3.times { conn.rpush 'queue:bar', '{}' }
|
103
|
-
conn.sadd 'queues', 'bar'
|
104
|
-
end
|
105
|
-
|
106
|
-
s = Sidekiq::Stats.new
|
107
|
-
assert_equal 4, s.enqueued
|
108
|
-
end
|
109
|
-
end
|
110
|
-
|
111
|
-
describe "over time" do
|
112
|
-
before do
|
113
|
-
@before = DateTime::DATE_FORMATS[:default]
|
114
|
-
DateTime::DATE_FORMATS[:default] = "%d/%m/%Y %H:%M:%S"
|
115
|
-
end
|
116
|
-
|
117
|
-
after do
|
118
|
-
DateTime::DATE_FORMATS[:default] = @before
|
119
|
-
end
|
120
|
-
|
121
|
-
describe "processed" do
|
122
|
-
it 'retrieves hash of dates' do
|
123
|
-
Sidekiq.redis do |c|
|
124
|
-
c.incrby("stat:processed:2012-12-24", 4)
|
125
|
-
c.incrby("stat:processed:2012-12-25", 1)
|
126
|
-
c.incrby("stat:processed:2012-12-26", 6)
|
127
|
-
c.incrby("stat:processed:2012-12-27", 2)
|
128
|
-
end
|
129
|
-
Time.stub(:now, Time.parse("2012-12-26 1:00:00 -0500")) do
|
130
|
-
s = Sidekiq::Stats::History.new(2)
|
131
|
-
assert_equal({ "2012-12-26" => 6, "2012-12-25" => 1 }, s.processed)
|
132
|
-
|
133
|
-
s = Sidekiq::Stats::History.new(3)
|
134
|
-
assert_equal({ "2012-12-26" => 6, "2012-12-25" => 1, "2012-12-24" => 4 }, s.processed)
|
135
|
-
|
136
|
-
s = Sidekiq::Stats::History.new(2, Date.parse("2012-12-25"))
|
137
|
-
assert_equal({ "2012-12-25" => 1, "2012-12-24" => 4 }, s.processed)
|
138
|
-
end
|
139
|
-
end
|
140
|
-
end
|
141
|
-
|
142
|
-
describe "failed" do
|
143
|
-
it 'retrieves hash of dates' do
|
144
|
-
Sidekiq.redis do |c|
|
145
|
-
c.incrby("stat:failed:2012-12-24", 4)
|
146
|
-
c.incrby("stat:failed:2012-12-25", 1)
|
147
|
-
c.incrby("stat:failed:2012-12-26", 6)
|
148
|
-
c.incrby("stat:failed:2012-12-27", 2)
|
149
|
-
end
|
150
|
-
Time.stub(:now, Time.parse("2012-12-26 1:00:00 -0500")) do
|
151
|
-
s = Sidekiq::Stats::History.new(2)
|
152
|
-
assert_equal ({ "2012-12-26" => 6, "2012-12-25" => 1 }), s.failed
|
153
|
-
|
154
|
-
s = Sidekiq::Stats::History.new(3)
|
155
|
-
assert_equal ({ "2012-12-26" => 6, "2012-12-25" => 1, "2012-12-24" => 4 }), s.failed
|
156
|
-
|
157
|
-
s = Sidekiq::Stats::History.new(2, Date.parse("2012-12-25"))
|
158
|
-
assert_equal ({ "2012-12-25" => 1, "2012-12-24" => 4 }), s.failed
|
159
|
-
end
|
160
|
-
end
|
161
|
-
end
|
162
|
-
end
|
163
|
-
end
|
164
|
-
|
165
|
-
describe 'with an empty database' do
|
166
|
-
before do
|
167
|
-
Sidekiq.redis {|c| c.flushdb }
|
168
|
-
end
|
169
|
-
|
170
|
-
it 'shows queue as empty' do
|
171
|
-
q = Sidekiq::Queue.new
|
172
|
-
assert_equal 0, q.size
|
173
|
-
assert_equal 0, q.latency
|
174
|
-
end
|
175
|
-
|
176
|
-
class ApiWorker
|
177
|
-
include Sidekiq::Worker
|
178
|
-
end
|
179
|
-
|
180
|
-
it 'can enumerate jobs' do
|
181
|
-
q = Sidekiq::Queue.new
|
182
|
-
Time.stub(:now, Time.new(2012, 12, 26)) do
|
183
|
-
ApiWorker.perform_async(1, 'mike')
|
184
|
-
assert_equal ['TestApi::ApiWorker'], q.map(&:klass)
|
185
|
-
|
186
|
-
job = q.first
|
187
|
-
assert_equal 24, job.jid.size
|
188
|
-
assert_equal [1, 'mike'], job.args
|
189
|
-
assert_equal Time.new(2012, 12, 26), job.enqueued_at
|
190
|
-
end
|
191
|
-
|
192
|
-
assert q.latency > 10_000_000
|
193
|
-
|
194
|
-
q = Sidekiq::Queue.new('other')
|
195
|
-
assert_equal 0, q.size
|
196
|
-
end
|
197
|
-
|
198
|
-
it 'has no enqueued_at time for jobs enqueued in the future' do
|
199
|
-
job_id = ApiWorker.perform_in(100, 1, 'foo')
|
200
|
-
job = Sidekiq::ScheduledSet.new.find_job(job_id)
|
201
|
-
assert_nil job.enqueued_at
|
202
|
-
end
|
203
|
-
|
204
|
-
it 'unwraps delayed jobs' do
|
205
|
-
Sidekiq::Queue.delay.foo(1,2,3)
|
206
|
-
q = Sidekiq::Queue.new
|
207
|
-
x = q.first
|
208
|
-
assert_equal "Sidekiq::Queue.foo", x.display_class
|
209
|
-
assert_equal [1,2,3], x.display_args
|
210
|
-
end
|
211
|
-
|
212
|
-
it 'can delete jobs' do
|
213
|
-
q = Sidekiq::Queue.new
|
214
|
-
ApiWorker.perform_async(1, 'mike')
|
215
|
-
assert_equal 1, q.size
|
216
|
-
|
217
|
-
x = q.first
|
218
|
-
assert_equal "TestApi::ApiWorker", x.display_class
|
219
|
-
assert_equal [1,'mike'], x.display_args
|
220
|
-
|
221
|
-
assert_equal [true], q.map(&:delete)
|
222
|
-
assert_equal 0, q.size
|
223
|
-
end
|
224
|
-
|
225
|
-
it "can move scheduled job to queue" do
|
226
|
-
remain_id = ApiWorker.perform_in(100, 1, 'jason')
|
227
|
-
job_id = ApiWorker.perform_in(100, 1, 'jason')
|
228
|
-
job = Sidekiq::ScheduledSet.new.find_job(job_id)
|
229
|
-
q = Sidekiq::Queue.new
|
230
|
-
job.add_to_queue
|
231
|
-
queued_job = q.find_job(job_id)
|
232
|
-
refute_nil queued_job
|
233
|
-
assert_equal queued_job.jid, job_id
|
234
|
-
assert_nil Sidekiq::ScheduledSet.new.find_job(job_id)
|
235
|
-
refute_nil Sidekiq::ScheduledSet.new.find_job(remain_id)
|
236
|
-
end
|
237
|
-
|
238
|
-
it "handles multiple scheduled jobs when moving to queue" do
|
239
|
-
jids = Sidekiq::Client.push_bulk('class' => ApiWorker,
|
240
|
-
'args' => [[1, 'jason'], [2, 'jason']],
|
241
|
-
'at' => Time.now.to_f)
|
242
|
-
assert_equal 2, jids.size
|
243
|
-
(remain_id, job_id) = jids
|
244
|
-
job = Sidekiq::ScheduledSet.new.find_job(job_id)
|
245
|
-
q = Sidekiq::Queue.new
|
246
|
-
job.add_to_queue
|
247
|
-
queued_job = q.find_job(job_id)
|
248
|
-
refute_nil queued_job
|
249
|
-
assert_equal queued_job.jid, job_id
|
250
|
-
assert_nil Sidekiq::ScheduledSet.new.find_job(job_id)
|
251
|
-
refute_nil Sidekiq::ScheduledSet.new.find_job(remain_id)
|
252
|
-
end
|
253
|
-
|
254
|
-
it 'can find job by id in sorted sets' do
|
255
|
-
job_id = ApiWorker.perform_in(100, 1, 'jason')
|
256
|
-
job = Sidekiq::ScheduledSet.new.find_job(job_id)
|
257
|
-
refute_nil job
|
258
|
-
assert_equal job_id, job.jid
|
259
|
-
assert_in_delta job.latency, 0.0, 0.1
|
260
|
-
end
|
261
|
-
|
262
|
-
it 'can remove jobs when iterating over a sorted set' do
|
263
|
-
# scheduled jobs must be greater than SortedSet#each underlying page size
|
264
|
-
51.times do
|
265
|
-
ApiWorker.perform_in(100, 'aaron')
|
266
|
-
end
|
267
|
-
set = Sidekiq::ScheduledSet.new
|
268
|
-
set.map(&:delete)
|
269
|
-
assert_equal set.size, 0
|
270
|
-
end
|
271
|
-
|
272
|
-
it 'can remove jobs when iterating over a queue' do
|
273
|
-
# initial queue size must be greater than Queue#each underlying page size
|
274
|
-
51.times do
|
275
|
-
ApiWorker.perform_async(1, 'aaron')
|
276
|
-
end
|
277
|
-
q = Sidekiq::Queue.new
|
278
|
-
q.map(&:delete)
|
279
|
-
assert_equal q.size, 0
|
280
|
-
end
|
281
|
-
|
282
|
-
it 'can find job by id in queues' do
|
283
|
-
q = Sidekiq::Queue.new
|
284
|
-
job_id = ApiWorker.perform_async(1, 'jason')
|
285
|
-
job = q.find_job(job_id)
|
286
|
-
refute_nil job
|
287
|
-
assert_equal job_id, job.jid
|
288
|
-
end
|
289
|
-
|
290
|
-
it 'can clear a queue' do
|
291
|
-
q = Sidekiq::Queue.new
|
292
|
-
2.times { ApiWorker.perform_async(1, 'mike') }
|
293
|
-
q.clear
|
294
|
-
|
295
|
-
Sidekiq.redis do |conn|
|
296
|
-
refute conn.smembers('queues').include?('foo')
|
297
|
-
refute conn.exists('queue:foo')
|
298
|
-
end
|
299
|
-
end
|
300
|
-
|
301
|
-
it 'can fetch by score' do
|
302
|
-
same_time = Time.now.to_f
|
303
|
-
add_retry('bob1', same_time)
|
304
|
-
add_retry('bob2', same_time)
|
305
|
-
r = Sidekiq::RetrySet.new
|
306
|
-
assert_equal 2, r.fetch(same_time).size
|
307
|
-
end
|
308
|
-
|
309
|
-
it 'can fetch by score and jid' do
|
310
|
-
same_time = Time.now.to_f
|
311
|
-
add_retry('bob1', same_time)
|
312
|
-
add_retry('bob2', same_time)
|
313
|
-
r = Sidekiq::RetrySet.new
|
314
|
-
assert_equal 1, r.fetch(same_time, 'bob1').size
|
315
|
-
end
|
316
|
-
|
317
|
-
it 'shows empty retries' do
|
318
|
-
r = Sidekiq::RetrySet.new
|
319
|
-
assert_equal 0, r.size
|
320
|
-
end
|
321
|
-
|
322
|
-
it 'can enumerate retries' do
|
323
|
-
add_retry
|
324
|
-
|
325
|
-
r = Sidekiq::RetrySet.new
|
326
|
-
assert_equal 1, r.size
|
327
|
-
array = r.to_a
|
328
|
-
assert_equal 1, array.size
|
329
|
-
|
330
|
-
retri = array.first
|
331
|
-
assert_equal 'ApiWorker', retri.klass
|
332
|
-
assert_equal 'default', retri.queue
|
333
|
-
assert_equal 'bob', retri.jid
|
334
|
-
assert_in_delta Time.now.to_f, retri.at.to_f, 0.02
|
335
|
-
end
|
336
|
-
|
337
|
-
it 'requires a jid to delete an entry' do
|
338
|
-
start_time = Time.now.to_f
|
339
|
-
add_retry('bob2', Time.now.to_f)
|
340
|
-
assert_raises(ArgumentError) do
|
341
|
-
Sidekiq::RetrySet.new.delete(start_time)
|
342
|
-
end
|
343
|
-
end
|
344
|
-
|
345
|
-
it 'can delete a single retry from score and jid' do
|
346
|
-
same_time = Time.now.to_f
|
347
|
-
add_retry('bob1', same_time)
|
348
|
-
add_retry('bob2', same_time)
|
349
|
-
r = Sidekiq::RetrySet.new
|
350
|
-
assert_equal 2, r.size
|
351
|
-
Sidekiq::RetrySet.new.delete(same_time, 'bob1')
|
352
|
-
assert_equal 1, r.size
|
353
|
-
end
|
354
|
-
|
355
|
-
it 'can retry a retry' do
|
356
|
-
add_retry
|
357
|
-
r = Sidekiq::RetrySet.new
|
358
|
-
assert_equal 1, r.size
|
359
|
-
r.first.retry
|
360
|
-
assert_equal 0, r.size
|
361
|
-
assert_equal 1, Sidekiq::Queue.new('default').size
|
362
|
-
job = Sidekiq::Queue.new('default').first
|
363
|
-
assert_equal 'bob', job.jid
|
364
|
-
assert_equal 1, job['retry_count']
|
365
|
-
end
|
366
|
-
|
367
|
-
it 'can clear retries' do
|
368
|
-
add_retry
|
369
|
-
add_retry('test')
|
370
|
-
r = Sidekiq::RetrySet.new
|
371
|
-
assert_equal 2, r.size
|
372
|
-
r.clear
|
373
|
-
assert_equal 0, r.size
|
374
|
-
end
|
375
|
-
|
376
|
-
it 'can enumerate processes' do
|
377
|
-
identity_string = "identity_string"
|
378
|
-
odata = {
|
379
|
-
'pid' => 123,
|
380
|
-
'hostname' => Socket.gethostname,
|
381
|
-
'key' => identity_string,
|
382
|
-
'identity' => identity_string,
|
383
|
-
'started_at' => Time.now.to_f - 15,
|
384
|
-
}
|
385
|
-
|
386
|
-
time = Time.now.to_f
|
387
|
-
Sidekiq.redis do |conn|
|
388
|
-
conn.multi do
|
389
|
-
conn.sadd('processes', odata['key'])
|
390
|
-
conn.hmset(odata['key'], 'info', Sidekiq.dump_json(odata), 'busy', 10, 'beat', time)
|
391
|
-
conn.sadd('processes', 'fake:pid')
|
392
|
-
end
|
393
|
-
end
|
394
|
-
|
395
|
-
ps = Sidekiq::ProcessSet.new.to_a
|
396
|
-
assert_equal 1, ps.size
|
397
|
-
data = ps.first
|
398
|
-
assert_equal 10, data['busy']
|
399
|
-
assert_equal time, data['beat']
|
400
|
-
assert_equal 123, data['pid']
|
401
|
-
data.quiet!
|
402
|
-
data.stop!
|
403
|
-
signals_string = "#{odata['key']}-signals"
|
404
|
-
assert_equal "TERM", Sidekiq.redis{|c| c.lpop(signals_string) }
|
405
|
-
assert_equal "USR1", Sidekiq.redis{|c| c.lpop(signals_string) }
|
406
|
-
end
|
407
|
-
|
408
|
-
it 'can enumerate workers' do
|
409
|
-
w = Sidekiq::Workers.new
|
410
|
-
assert_equal 0, w.size
|
411
|
-
w.each do
|
412
|
-
assert false
|
413
|
-
end
|
414
|
-
|
415
|
-
hn = Socket.gethostname
|
416
|
-
key = "#{hn}:#{$$}"
|
417
|
-
pdata = { 'pid' => $$, 'hostname' => hn, 'started_at' => Time.now.to_i }
|
418
|
-
Sidekiq.redis do |conn|
|
419
|
-
conn.sadd('processes', key)
|
420
|
-
conn.hmset(key, 'info', Sidekiq.dump_json(pdata), 'busy', 0, 'beat', Time.now.to_f)
|
421
|
-
end
|
422
|
-
|
423
|
-
s = "#{key}:workers"
|
424
|
-
data = Sidekiq.dump_json({ 'payload' => {}, 'queue' => 'default', 'run_at' => Time.now.to_i })
|
425
|
-
Sidekiq.redis do |c|
|
426
|
-
c.hmset(s, '1234', data)
|
427
|
-
end
|
428
|
-
|
429
|
-
w.each do |p, x, y|
|
430
|
-
assert_equal key, p
|
431
|
-
assert_equal "1234", x
|
432
|
-
assert_equal 'default', y['queue']
|
433
|
-
assert_equal Time.now.year, Time.at(y['run_at']).year
|
434
|
-
end
|
435
|
-
|
436
|
-
s = "#{key}:workers"
|
437
|
-
data = Sidekiq.dump_json({ 'payload' => {}, 'queue' => 'default', 'run_at' => (Time.now.to_i - 2*60*60) })
|
438
|
-
Sidekiq.redis do |c|
|
439
|
-
c.multi do
|
440
|
-
c.hmset(s, '5678', data)
|
441
|
-
c.hmset("b#{s}", '5678', data)
|
442
|
-
end
|
443
|
-
end
|
444
|
-
|
445
|
-
assert_equal ['1234', '5678'], w.map { |_, tid, _| tid }
|
446
|
-
end
|
447
|
-
|
448
|
-
it 'can reschedule jobs' do
|
449
|
-
add_retry('foo1')
|
450
|
-
add_retry('foo2')
|
451
|
-
|
452
|
-
retries = Sidekiq::RetrySet.new
|
453
|
-
assert_equal 2, retries.size
|
454
|
-
refute(retries.map { |r| r.score > (Time.now.to_f + 9) }.any?)
|
455
|
-
|
456
|
-
retries.each do |retri|
|
457
|
-
retri.reschedule(Time.now.to_f + 10) if retri.jid == 'foo2'
|
458
|
-
end
|
459
|
-
|
460
|
-
assert_equal 2, retries.size
|
461
|
-
assert(retries.map { |r| r.score > (Time.now.to_f + 9) }.any?)
|
462
|
-
end
|
463
|
-
|
464
|
-
it 'prunes processes which have died' do
|
465
|
-
data = { 'pid' => rand(10_000), 'hostname' => "app#{rand(1_000)}", 'started_at' => Time.now.to_f }
|
466
|
-
key = "#{data['hostname']}:#{data['pid']}"
|
467
|
-
Sidekiq.redis do |conn|
|
468
|
-
conn.sadd('processes', key)
|
469
|
-
conn.hmset(key, 'info', Sidekiq.dump_json(data), 'busy', 0, 'beat', Time.now.to_f)
|
470
|
-
end
|
471
|
-
|
472
|
-
ps = Sidekiq::ProcessSet.new
|
473
|
-
assert_equal 1, ps.size
|
474
|
-
assert_equal 1, ps.to_a.size
|
475
|
-
|
476
|
-
Sidekiq.redis do |conn|
|
477
|
-
conn.sadd('processes', "bar:987")
|
478
|
-
conn.sadd('processes', "bar:986")
|
479
|
-
end
|
480
|
-
|
481
|
-
ps = Sidekiq::ProcessSet.new
|
482
|
-
assert_equal 1, ps.size
|
483
|
-
assert_equal 1, ps.to_a.size
|
484
|
-
end
|
485
|
-
|
486
|
-
def add_retry(jid = 'bob', at = Time.now.to_f)
|
487
|
-
payload = Sidekiq.dump_json('class' => 'ApiWorker', 'args' => [1, 'mike'], 'queue' => 'default', 'jid' => jid, 'retry_count' => 2, 'failed_at' => Time.now.to_f)
|
488
|
-
Sidekiq.redis do |conn|
|
489
|
-
conn.zadd('retry', at.to_s, payload)
|
490
|
-
end
|
491
|
-
end
|
492
|
-
end
|
493
|
-
end
|