roundhouse-x 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +12 -0
- data/.travis.yml +16 -0
- data/3.0-Upgrade.md +70 -0
- data/Changes.md +1127 -0
- data/Gemfile +27 -0
- data/LICENSE +7 -0
- data/README.md +52 -0
- data/Rakefile +9 -0
- data/bin/roundhouse +19 -0
- data/bin/roundhousectl +93 -0
- data/lib/generators/roundhouse/templates/worker.rb.erb +9 -0
- data/lib/generators/roundhouse/templates/worker_spec.rb.erb +6 -0
- data/lib/generators/roundhouse/templates/worker_test.rb.erb +8 -0
- data/lib/generators/roundhouse/worker_generator.rb +49 -0
- data/lib/roundhouse/actor.rb +39 -0
- data/lib/roundhouse/api.rb +859 -0
- data/lib/roundhouse/cli.rb +396 -0
- data/lib/roundhouse/client.rb +210 -0
- data/lib/roundhouse/core_ext.rb +105 -0
- data/lib/roundhouse/exception_handler.rb +30 -0
- data/lib/roundhouse/fetch.rb +154 -0
- data/lib/roundhouse/launcher.rb +98 -0
- data/lib/roundhouse/logging.rb +104 -0
- data/lib/roundhouse/manager.rb +236 -0
- data/lib/roundhouse/middleware/chain.rb +149 -0
- data/lib/roundhouse/middleware/i18n.rb +41 -0
- data/lib/roundhouse/middleware/server/active_record.rb +13 -0
- data/lib/roundhouse/middleware/server/logging.rb +40 -0
- data/lib/roundhouse/middleware/server/retry_jobs.rb +206 -0
- data/lib/roundhouse/monitor.rb +124 -0
- data/lib/roundhouse/paginator.rb +42 -0
- data/lib/roundhouse/processor.rb +159 -0
- data/lib/roundhouse/rails.rb +24 -0
- data/lib/roundhouse/redis_connection.rb +77 -0
- data/lib/roundhouse/scheduled.rb +115 -0
- data/lib/roundhouse/testing/inline.rb +28 -0
- data/lib/roundhouse/testing.rb +193 -0
- data/lib/roundhouse/util.rb +68 -0
- data/lib/roundhouse/version.rb +3 -0
- data/lib/roundhouse/web.rb +264 -0
- data/lib/roundhouse/web_helpers.rb +249 -0
- data/lib/roundhouse/worker.rb +90 -0
- data/lib/roundhouse.rb +177 -0
- data/roundhouse.gemspec +27 -0
- data/test/config.yml +9 -0
- data/test/env_based_config.yml +11 -0
- data/test/fake_env.rb +0 -0
- data/test/fixtures/en.yml +2 -0
- data/test/helper.rb +49 -0
- data/test/test_api.rb +521 -0
- data/test/test_cli.rb +389 -0
- data/test/test_client.rb +294 -0
- data/test/test_exception_handler.rb +55 -0
- data/test/test_fetch.rb +206 -0
- data/test/test_logging.rb +34 -0
- data/test/test_manager.rb +169 -0
- data/test/test_middleware.rb +160 -0
- data/test/test_monitor.rb +258 -0
- data/test/test_processor.rb +176 -0
- data/test/test_rails.rb +23 -0
- data/test/test_redis_connection.rb +127 -0
- data/test/test_retry.rb +390 -0
- data/test/test_roundhouse.rb +87 -0
- data/test/test_scheduled.rb +120 -0
- data/test/test_scheduling.rb +75 -0
- data/test/test_testing.rb +78 -0
- data/test/test_testing_fake.rb +240 -0
- data/test/test_testing_inline.rb +65 -0
- data/test/test_util.rb +18 -0
- data/test/test_web.rb +605 -0
- data/test/test_web_helpers.rb +52 -0
- data/web/assets/images/bootstrap/glyphicons-halflings-white.png +0 -0
- data/web/assets/images/bootstrap/glyphicons-halflings.png +0 -0
- data/web/assets/images/logo.png +0 -0
- data/web/assets/images/status/active.png +0 -0
- data/web/assets/images/status/idle.png +0 -0
- data/web/assets/images/status-sd8051fd480.png +0 -0
- data/web/assets/javascripts/application.js +83 -0
- data/web/assets/javascripts/dashboard.js +300 -0
- data/web/assets/javascripts/locales/README.md +27 -0
- data/web/assets/javascripts/locales/jquery.timeago.ar.js +96 -0
- data/web/assets/javascripts/locales/jquery.timeago.bg.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.bs.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.ca.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.cs.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.cy.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.da.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.de.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.el.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.en-short.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.en.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.es.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.et.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.fa.js +22 -0
- data/web/assets/javascripts/locales/jquery.timeago.fi.js +28 -0
- data/web/assets/javascripts/locales/jquery.timeago.fr-short.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.fr.js +17 -0
- data/web/assets/javascripts/locales/jquery.timeago.he.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.hr.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.hu.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.hy.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.id.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.it.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.ja.js +19 -0
- data/web/assets/javascripts/locales/jquery.timeago.ko.js +17 -0
- data/web/assets/javascripts/locales/jquery.timeago.lt.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.mk.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.nl.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.no.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.pl.js +31 -0
- data/web/assets/javascripts/locales/jquery.timeago.pt-br.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.pt.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.ro.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.rs.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.ru.js +34 -0
- data/web/assets/javascripts/locales/jquery.timeago.sk.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.sl.js +44 -0
- data/web/assets/javascripts/locales/jquery.timeago.sv.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.th.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.tr.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.uk.js +34 -0
- data/web/assets/javascripts/locales/jquery.timeago.uz.js +19 -0
- data/web/assets/javascripts/locales/jquery.timeago.zh-cn.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.zh-tw.js +20 -0
- data/web/assets/stylesheets/application.css +746 -0
- data/web/assets/stylesheets/bootstrap.css +9 -0
- data/web/locales/cs.yml +68 -0
- data/web/locales/da.yml +68 -0
- data/web/locales/de.yml +69 -0
- data/web/locales/el.yml +68 -0
- data/web/locales/en.yml +77 -0
- data/web/locales/es.yml +69 -0
- data/web/locales/fr.yml +69 -0
- data/web/locales/hi.yml +75 -0
- data/web/locales/it.yml +69 -0
- data/web/locales/ja.yml +69 -0
- data/web/locales/ko.yml +68 -0
- data/web/locales/nl.yml +68 -0
- data/web/locales/no.yml +69 -0
- data/web/locales/pl.yml +59 -0
- data/web/locales/pt-br.yml +68 -0
- data/web/locales/pt.yml +67 -0
- data/web/locales/ru.yml +75 -0
- data/web/locales/sv.yml +68 -0
- data/web/locales/ta.yml +75 -0
- data/web/locales/zh-cn.yml +68 -0
- data/web/locales/zh-tw.yml +68 -0
- data/web/views/_footer.erb +22 -0
- data/web/views/_job_info.erb +84 -0
- data/web/views/_nav.erb +66 -0
- data/web/views/_paging.erb +23 -0
- data/web/views/_poll_js.erb +5 -0
- data/web/views/_poll_link.erb +7 -0
- data/web/views/_status.erb +4 -0
- data/web/views/_summary.erb +40 -0
- data/web/views/busy.erb +90 -0
- data/web/views/dashboard.erb +75 -0
- data/web/views/dead.erb +34 -0
- data/web/views/layout.erb +31 -0
- data/web/views/morgue.erb +71 -0
- data/web/views/queue.erb +45 -0
- data/web/views/queues.erb +27 -0
- data/web/views/retries.erb +74 -0
- data/web/views/retry.erb +34 -0
- data/web/views/scheduled.erb +54 -0
- data/web/views/scheduled_job_info.erb +8 -0
- metadata +404 -0
@@ -0,0 +1,159 @@
|
|
1
|
+
require 'roundhouse/util'
|
2
|
+
require 'roundhouse/actor'
|
3
|
+
require 'roundhouse/monitor'
|
4
|
+
|
5
|
+
require 'roundhouse/middleware/server/retry_jobs'
|
6
|
+
require 'roundhouse/middleware/server/logging'
|
7
|
+
|
8
|
+
module Roundhouse
|
9
|
+
##
|
10
|
+
# The Processor receives a message from the Manager and actually
|
11
|
+
# processes it. It instantiates the worker, runs the middleware
|
12
|
+
# chain and then calls Roundhouse::Worker#perform.
|
13
|
+
class Processor
|
14
|
+
# To prevent a memory leak, ensure that stats expire. However, they should take up a minimal amount of storage
|
15
|
+
# so keep them around for a long time
|
16
|
+
STATS_TIMEOUT = 24 * 60 * 60 * 365 * 5
|
17
|
+
|
18
|
+
include Util
|
19
|
+
include Actor
|
20
|
+
|
21
|
+
def self.default_middleware
|
22
|
+
Middleware::Chain.new do |m|
|
23
|
+
m.add Middleware::Server::Logging
|
24
|
+
m.add Middleware::Server::RetryJobs
|
25
|
+
if defined?(::ActiveRecord::Base)
|
26
|
+
require 'roundhouse/middleware/server/active_record'
|
27
|
+
m.add Roundhouse::Middleware::Server::ActiveRecord
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
attr_accessor :proxy_id
|
33
|
+
|
34
|
+
def initialize(boss)
|
35
|
+
@boss = boss
|
36
|
+
end
|
37
|
+
|
38
|
+
def process(work)
|
39
|
+
msgstr = work.message
|
40
|
+
queue = work.queue_id
|
41
|
+
|
42
|
+
@boss.async.real_thread(proxy_id, Thread.current)
|
43
|
+
|
44
|
+
ack = true
|
45
|
+
begin
|
46
|
+
msg = Roundhouse.load_json(msgstr)
|
47
|
+
klass = msg['class'].constantize
|
48
|
+
worker = klass.new
|
49
|
+
worker.jid = msg['jid']
|
50
|
+
|
51
|
+
stats(worker, msg, queue) do
|
52
|
+
Roundhouse.server_middleware.invoke(worker, msg, queue) do
|
53
|
+
execute_job(worker, cloned(msg['args']))
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
# Put the queue back into rotation
|
58
|
+
Roundhouse.redis { |conn| Roundhouse::Monitor.push(conn, queue) }
|
59
|
+
rescue Roundhouse::Shutdown
|
60
|
+
# Had to force kill this job because it didn't finish
|
61
|
+
# within the timeout. Don't acknowledge the work since
|
62
|
+
# we didn't properly finish it.
|
63
|
+
ack = false
|
64
|
+
rescue Exception => ex
|
65
|
+
handle_exception(ex, msg || { :message => msgstr })
|
66
|
+
raise
|
67
|
+
ensure
|
68
|
+
work.acknowledge if ack
|
69
|
+
end
|
70
|
+
|
71
|
+
@boss.async.processor_done(current_actor)
|
72
|
+
end
|
73
|
+
|
74
|
+
def inspect
|
75
|
+
"<Processor##{object_id.to_s(16)}>"
|
76
|
+
end
|
77
|
+
|
78
|
+
def execute_job(worker, cloned_args)
|
79
|
+
worker.perform(*cloned_args)
|
80
|
+
end
|
81
|
+
|
82
|
+
private
|
83
|
+
|
84
|
+
def thread_identity
|
85
|
+
@str ||= Thread.current.object_id.to_s(36)
|
86
|
+
end
|
87
|
+
|
88
|
+
def stats(worker, msg, queue)
|
89
|
+
# Do not conflate errors from the job with errors caused by updating
|
90
|
+
# stats so calling code can react appropriately
|
91
|
+
retry_and_suppress_exceptions do
|
92
|
+
hash = Roundhouse.dump_json({:queue => queue, :payload => msg, :run_at => Time.now.to_i })
|
93
|
+
Roundhouse.redis do |conn|
|
94
|
+
conn.multi do
|
95
|
+
conn.hmset("#{identity}:workers", thread_identity, hash)
|
96
|
+
conn.expire("#{identity}:workers", 60*60*4)
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
begin
|
102
|
+
yield
|
103
|
+
rescue Exception
|
104
|
+
retry_and_suppress_exceptions do
|
105
|
+
failed = "stat:failed:#{Time.now.utc.to_date}"
|
106
|
+
Roundhouse.redis do |conn|
|
107
|
+
conn.multi do
|
108
|
+
conn.incrby("stat:failed", 1)
|
109
|
+
conn.incrby(failed, 1)
|
110
|
+
conn.expire(failed, STATS_TIMEOUT)
|
111
|
+
end
|
112
|
+
end
|
113
|
+
end
|
114
|
+
raise
|
115
|
+
ensure
|
116
|
+
retry_and_suppress_exceptions do
|
117
|
+
processed = "stat:processed:#{Time.now.utc.to_date}"
|
118
|
+
Roundhouse.redis do |conn|
|
119
|
+
conn.multi do
|
120
|
+
conn.hdel("#{identity}:workers", thread_identity)
|
121
|
+
conn.incrby("stat:processed", 1)
|
122
|
+
conn.incrby(processed, 1)
|
123
|
+
conn.expire(processed, STATS_TIMEOUT)
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
# Deep clone the arguments passed to the worker so that if
|
131
|
+
# the message fails, what is pushed back onto Redis hasn't
|
132
|
+
# been mutated by the worker.
|
133
|
+
def cloned(ary)
|
134
|
+
Marshal.load(Marshal.dump(ary))
|
135
|
+
end
|
136
|
+
|
137
|
+
# If an exception occurs in the block passed to this method, that block will be retried up to max_retries times.
|
138
|
+
# All exceptions will be swallowed and logged.
|
139
|
+
def retry_and_suppress_exceptions(max_retries = 5)
|
140
|
+
retry_count = 0
|
141
|
+
begin
|
142
|
+
yield
|
143
|
+
rescue => e
|
144
|
+
retry_count += 1
|
145
|
+
if retry_count <= max_retries
|
146
|
+
Roundhouse.logger.debug {"Suppressing and retrying error: #{e.inspect}"}
|
147
|
+
pause_for_recovery(retry_count)
|
148
|
+
retry
|
149
|
+
else
|
150
|
+
handle_exception(e, { :message => "Exhausted #{max_retries} retries"})
|
151
|
+
end
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
def pause_for_recovery(retry_count)
|
156
|
+
sleep(retry_count)
|
157
|
+
end
|
158
|
+
end
|
159
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
module Roundhouse
|
2
|
+
def self.hook_rails!
|
3
|
+
# no op
|
4
|
+
# This may be completely removed in the future
|
5
|
+
# Those extensions make sense for Sidekiq, but do
|
6
|
+
# not make sense for Roundhouse
|
7
|
+
end
|
8
|
+
|
9
|
+
# Removes the generic aliases which MAY clash with names of already
|
10
|
+
# created methods by other applications. The methods `roundhouse_delay`,
|
11
|
+
# `roundhouse_delay_for` and `roundhouse_delay_until` can be used instead.
|
12
|
+
def self.remove_delay!
|
13
|
+
# no op
|
14
|
+
# This may be completely removed in the future
|
15
|
+
# Those extensions make sense for Sidekiq, but do
|
16
|
+
# not make sense for Roundhouse
|
17
|
+
end
|
18
|
+
|
19
|
+
class Rails < ::Rails::Engine
|
20
|
+
initializer 'roundhouse' do
|
21
|
+
Roundhouse.hook_rails!
|
22
|
+
end
|
23
|
+
end if defined?(::Rails)
|
24
|
+
end
|
@@ -0,0 +1,77 @@
|
|
1
|
+
require 'connection_pool'
|
2
|
+
require 'redis'
|
3
|
+
require 'uri'
|
4
|
+
|
5
|
+
module Roundhouse
|
6
|
+
class RedisConnection
|
7
|
+
class << self
|
8
|
+
|
9
|
+
def create(options={})
|
10
|
+
options[:url] ||= determine_redis_provider
|
11
|
+
|
12
|
+
# need a connection for Fetcher and Retry
|
13
|
+
size = options[:size] || (Roundhouse.server? ? (Roundhouse.options[:concurrency] + 2) : 5)
|
14
|
+
pool_timeout = options[:pool_timeout] || 1
|
15
|
+
|
16
|
+
log_info(options)
|
17
|
+
|
18
|
+
ConnectionPool.new(:timeout => pool_timeout, :size => size) do
|
19
|
+
build_client(options)
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
private
|
24
|
+
|
25
|
+
def build_client(options)
|
26
|
+
namespace = options[:namespace]
|
27
|
+
|
28
|
+
client = Redis.new client_opts(options)
|
29
|
+
if namespace
|
30
|
+
require 'redis/namespace'
|
31
|
+
Redis::Namespace.new(namespace, :redis => client)
|
32
|
+
else
|
33
|
+
client
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def client_opts(options)
|
38
|
+
opts = options.dup
|
39
|
+
if opts[:namespace]
|
40
|
+
opts.delete(:namespace)
|
41
|
+
end
|
42
|
+
|
43
|
+
if opts[:network_timeout]
|
44
|
+
opts[:timeout] = opts[:network_timeout]
|
45
|
+
opts.delete(:network_timeout)
|
46
|
+
end
|
47
|
+
|
48
|
+
opts[:driver] = opts[:driver] || 'ruby'
|
49
|
+
|
50
|
+
opts
|
51
|
+
end
|
52
|
+
|
53
|
+
def log_info(options)
|
54
|
+
# Don't log Redis AUTH password
|
55
|
+
redacted = "REDACTED"
|
56
|
+
scrubbed_options = options.dup
|
57
|
+
if scrubbed_options[:url] && (uri = URI.parse(scrubbed_options[:url])) && uri.password
|
58
|
+
uri.password = redacted
|
59
|
+
scrubbed_options[:url] = uri.to_s
|
60
|
+
end
|
61
|
+
if scrubbed_options[:password]
|
62
|
+
scrubbed_options[:password] = redacted
|
63
|
+
end
|
64
|
+
if Roundhouse.server?
|
65
|
+
Roundhouse.logger.info("Booting Roundhouse #{Roundhouse::VERSION} with redis options #{scrubbed_options}")
|
66
|
+
else
|
67
|
+
Roundhouse.logger.debug("#{Roundhouse::NAME} client with redis options #{scrubbed_options}")
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
def determine_redis_provider
|
72
|
+
ENV[ENV['REDIS_PROVIDER'] || 'REDIS_URL']
|
73
|
+
end
|
74
|
+
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
@@ -0,0 +1,115 @@
|
|
1
|
+
require 'roundhouse'
|
2
|
+
require 'roundhouse/util'
|
3
|
+
require 'roundhouse/actor'
|
4
|
+
require 'roundhouse/api'
|
5
|
+
|
6
|
+
module Roundhouse
|
7
|
+
module Scheduled
|
8
|
+
SETS = %w(retry schedule)
|
9
|
+
|
10
|
+
class Enq
|
11
|
+
def enqueue_jobs(now=Time.now.to_f.to_s, sorted_sets=SETS)
|
12
|
+
# A job's "score" in Redis is the time at which it should be processed.
|
13
|
+
# Just check Redis for the set of jobs with a timestamp before now.
|
14
|
+
Roundhouse.redis do |conn|
|
15
|
+
sorted_sets.each do |sorted_set|
|
16
|
+
# Get the next item in the queue if it's score (time to execute) is <= now.
|
17
|
+
# We need to go through the list one at a time to reduce the risk of something
|
18
|
+
# going wrong between the time jobs are popped from the scheduled queue and when
|
19
|
+
# they are pushed onto a work queue and losing the jobs.
|
20
|
+
while job = conn.zrangebyscore(sorted_set, '-inf', now, :limit => [0, 1]).first do
|
21
|
+
|
22
|
+
# Pop item off the queue and add it to the work queue. If the job can't be popped from
|
23
|
+
# the queue, it's because another process already popped it so we can move on to the
|
24
|
+
# next one.
|
25
|
+
if conn.zrem(sorted_set, job)
|
26
|
+
Roundhouse::Client.push(Roundhouse.load_json(job))
|
27
|
+
Roundhouse::Logging.logger.debug { "enqueued #{sorted_set}: #{job}" }
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
##
|
36
|
+
# The Poller checks Redis every N seconds for jobs in the retry or scheduled
|
37
|
+
# set have passed their timestamp and should be enqueued. If so, it
|
38
|
+
# just pops the job back onto its original queue so the
|
39
|
+
# workers can pick it up like any other job.
|
40
|
+
class Poller
|
41
|
+
include Util
|
42
|
+
include Actor
|
43
|
+
|
44
|
+
INITIAL_WAIT = 10
|
45
|
+
|
46
|
+
def initialize
|
47
|
+
@enq = (Roundhouse.options[:scheduled_enq] || Roundhouse::Scheduled::Enq).new
|
48
|
+
end
|
49
|
+
|
50
|
+
def poll(first_time=false)
|
51
|
+
watchdog('scheduling poller thread died!') do
|
52
|
+
initial_wait if first_time
|
53
|
+
|
54
|
+
begin
|
55
|
+
@enq.enqueue_jobs
|
56
|
+
rescue => ex
|
57
|
+
# Most likely a problem with redis networking.
|
58
|
+
# Punt and try again at the next interval
|
59
|
+
logger.error ex.message
|
60
|
+
logger.error ex.backtrace.first
|
61
|
+
end
|
62
|
+
|
63
|
+
after(random_poll_interval) { poll }
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
private
|
68
|
+
|
69
|
+
# Calculates a random interval that is ±50% the desired average.
|
70
|
+
def random_poll_interval
|
71
|
+
poll_interval_average * rand + poll_interval_average.to_f / 2
|
72
|
+
end
|
73
|
+
|
74
|
+
# We do our best to tune the poll interval to the size of the active Roundhouse
|
75
|
+
# cluster. If you have 30 processes and poll every 15 seconds, that means one
|
76
|
+
# Roundhouse is checking Redis every 0.5 seconds - way too often for most people
|
77
|
+
# and really bad if the retry or scheduled sets are large.
|
78
|
+
#
|
79
|
+
# Instead try to avoid polling more than once every 15 seconds. If you have
|
80
|
+
# 30 Roundhouse processes, we'll poll every 30 * 15 or 450 seconds.
|
81
|
+
# To keep things statistically random, we'll sleep a random amount between
|
82
|
+
# 225 and 675 seconds for each poll or 450 seconds on average. Otherwise restarting
|
83
|
+
# all your Roundhouse processes at the same time will lead to them all polling at
|
84
|
+
# the same time: the thundering herd problem.
|
85
|
+
#
|
86
|
+
# We only do this if poll_interval is unset (the default).
|
87
|
+
def poll_interval_average
|
88
|
+
Roundhouse.options[:poll_interval_average] ||= scaled_poll_interval
|
89
|
+
end
|
90
|
+
|
91
|
+
# Calculates an average poll interval based on the number of known Roundhouse processes.
|
92
|
+
# This minimizes a single point of failure by dispersing check-ins but without taxing
|
93
|
+
# Redis if you run many Roundhouse processes.
|
94
|
+
def scaled_poll_interval
|
95
|
+
pcount = Roundhouse::ProcessSet.new.size
|
96
|
+
pcount = 1 if pcount == 0
|
97
|
+
pcount * Roundhouse.options[:average_scheduled_poll_interval]
|
98
|
+
end
|
99
|
+
|
100
|
+
def initial_wait
|
101
|
+
begin
|
102
|
+
# Have all processes sleep between 5-15 seconds. 10 seconds
|
103
|
+
# to give time for the heartbeat to register (if the poll interval is going to be calculated by the number
|
104
|
+
# of workers), and 5 random seconds to ensure they don't all hit Redis at the same time.
|
105
|
+
sleep(INITIAL_WAIT) unless Roundhouse.options[:poll_interval_average]
|
106
|
+
sleep(5 * rand)
|
107
|
+
rescue Celluloid::TaskTerminated
|
108
|
+
# Hit Ctrl-C when Roundhouse is finished booting and we have a chance
|
109
|
+
# to get here.
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
end
|
114
|
+
end
|
115
|
+
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
require 'roundhouse/testing'
|
2
|
+
|
3
|
+
##
|
4
|
+
# The Roundhouse inline infrastructure overrides perform_async so that it
|
5
|
+
# actually calls perform instead. This allows workers to be run inline in a
|
6
|
+
# testing environment.
|
7
|
+
#
|
8
|
+
# This is similar to `Resque.inline = true` functionality.
|
9
|
+
#
|
10
|
+
# Example:
|
11
|
+
#
|
12
|
+
# require 'roundhouse/testing/inline'
|
13
|
+
#
|
14
|
+
# $external_variable = 0
|
15
|
+
#
|
16
|
+
# class ExternalWorker
|
17
|
+
# include Roundhouse::Worker
|
18
|
+
#
|
19
|
+
# def perform
|
20
|
+
# $external_variable = 1
|
21
|
+
# end
|
22
|
+
# end
|
23
|
+
#
|
24
|
+
# assert_equal 0, $external_variable
|
25
|
+
# ExternalWorker.perform_async
|
26
|
+
# assert_equal 1, $external_variable
|
27
|
+
#
|
28
|
+
Roundhouse::Testing.inline!
|
@@ -0,0 +1,193 @@
|
|
1
|
+
require 'securerandom'
|
2
|
+
require 'roundhouse'
|
3
|
+
|
4
|
+
module Roundhouse
|
5
|
+
|
6
|
+
class Testing
|
7
|
+
class << self
|
8
|
+
attr_accessor :__test_mode
|
9
|
+
|
10
|
+
def __set_test_mode(mode)
|
11
|
+
if block_given?
|
12
|
+
current_mode = self.__test_mode
|
13
|
+
begin
|
14
|
+
self.__test_mode = mode
|
15
|
+
yield
|
16
|
+
ensure
|
17
|
+
self.__test_mode = current_mode
|
18
|
+
end
|
19
|
+
else
|
20
|
+
self.__test_mode = mode
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
def disable!(&block)
|
25
|
+
__set_test_mode(:disable, &block)
|
26
|
+
end
|
27
|
+
|
28
|
+
def fake!(&block)
|
29
|
+
__set_test_mode(:fake, &block)
|
30
|
+
end
|
31
|
+
|
32
|
+
def inline!(&block)
|
33
|
+
__set_test_mode(:inline, &block)
|
34
|
+
end
|
35
|
+
|
36
|
+
def enabled?
|
37
|
+
self.__test_mode != :disable
|
38
|
+
end
|
39
|
+
|
40
|
+
def disabled?
|
41
|
+
self.__test_mode == :disable
|
42
|
+
end
|
43
|
+
|
44
|
+
def fake?
|
45
|
+
self.__test_mode == :fake
|
46
|
+
end
|
47
|
+
|
48
|
+
def inline?
|
49
|
+
self.__test_mode == :inline
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
# Default to fake testing to keep old behavior
|
55
|
+
Roundhouse::Testing.fake!
|
56
|
+
|
57
|
+
class EmptyQueueError < RuntimeError; end
|
58
|
+
|
59
|
+
class Client
|
60
|
+
alias_method :raw_push_real, :raw_push
|
61
|
+
|
62
|
+
def raw_push(payloads)
|
63
|
+
if Roundhouse::Testing.fake?
|
64
|
+
payloads.each do |job|
|
65
|
+
job['class'].constantize.jobs << Roundhouse.load_json(Roundhouse.dump_json(job))
|
66
|
+
end
|
67
|
+
true
|
68
|
+
elsif Roundhouse::Testing.inline?
|
69
|
+
payloads.each do |job|
|
70
|
+
job['jid'] ||= SecureRandom.hex(12)
|
71
|
+
klass = job['class'].constantize
|
72
|
+
klass.jobs.unshift Roundhouse.load_json(Roundhouse.dump_json(job))
|
73
|
+
klass.perform_one
|
74
|
+
end
|
75
|
+
true
|
76
|
+
else
|
77
|
+
raw_push_real(payloads)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
module Worker
|
83
|
+
##
|
84
|
+
# The Roundhouse testing infrastructure overrides perform_async
|
85
|
+
# so that it does not actually touch the network. Instead it
|
86
|
+
# stores the asynchronous jobs in a per-class array so that
|
87
|
+
# their presence/absence can be asserted by your tests.
|
88
|
+
#
|
89
|
+
# This is similar to ActionMailer's :test delivery_method and its
|
90
|
+
# ActionMailer::Base.deliveries array.
|
91
|
+
#
|
92
|
+
# Example:
|
93
|
+
#
|
94
|
+
# require 'roundhouse/testing'
|
95
|
+
#
|
96
|
+
# assert_equal 0, HardWorker.jobs.size
|
97
|
+
# HardWorker.perform_async(:something)
|
98
|
+
# assert_equal 1, HardWorker.jobs.size
|
99
|
+
# assert_equal :something, HardWorker.jobs[0]['args'][0]
|
100
|
+
#
|
101
|
+
# assert_equal 0, Roundhouse::Extensions::DelayedMailer.jobs.size
|
102
|
+
# MyMailer.delay.send_welcome_email('foo@example.com')
|
103
|
+
# assert_equal 1, Roundhouse::Extensions::DelayedMailer.jobs.size
|
104
|
+
#
|
105
|
+
# You can also clear and drain all workers' jobs:
|
106
|
+
#
|
107
|
+
# assert_equal 0, Roundhouse::Extensions::DelayedMailer.jobs.size
|
108
|
+
# assert_equal 0, Roundhouse::Extensions::DelayedModel.jobs.size
|
109
|
+
#
|
110
|
+
# MyMailer.delay.send_welcome_email('foo@example.com')
|
111
|
+
# MyModel.delay.do_something_hard
|
112
|
+
#
|
113
|
+
# assert_equal 1, Roundhouse::Extensions::DelayedMailer.jobs.size
|
114
|
+
# assert_equal 1, Roundhouse::Extensions::DelayedModel.jobs.size
|
115
|
+
#
|
116
|
+
# Roundhouse::Worker.clear_all # or .drain_all
|
117
|
+
#
|
118
|
+
# assert_equal 0, Roundhouse::Extensions::DelayedMailer.jobs.size
|
119
|
+
# assert_equal 0, Roundhouse::Extensions::DelayedModel.jobs.size
|
120
|
+
#
|
121
|
+
# This can be useful to make sure jobs don't linger between tests:
|
122
|
+
#
|
123
|
+
# RSpec.configure do |config|
|
124
|
+
# config.before(:each) do
|
125
|
+
# Roundhouse::Worker.clear_all
|
126
|
+
# end
|
127
|
+
# end
|
128
|
+
#
|
129
|
+
# or for acceptance testing, i.e. with cucumber:
|
130
|
+
#
|
131
|
+
# AfterStep do
|
132
|
+
# Roundhouse::Worker.drain_all
|
133
|
+
# end
|
134
|
+
#
|
135
|
+
# When I sign up as "foo@example.com"
|
136
|
+
# Then I should receive a welcome email to "foo@example.com"
|
137
|
+
#
|
138
|
+
module ClassMethods
|
139
|
+
|
140
|
+
# Jobs queued for this worker
|
141
|
+
def jobs
|
142
|
+
Worker.jobs[self]
|
143
|
+
end
|
144
|
+
|
145
|
+
# Clear all jobs for this worker
|
146
|
+
def clear
|
147
|
+
jobs.clear
|
148
|
+
end
|
149
|
+
|
150
|
+
# Drain and run all jobs for this worker
|
151
|
+
def drain
|
152
|
+
while job = jobs.shift do
|
153
|
+
worker = new
|
154
|
+
worker.jid = job['jid']
|
155
|
+
worker.bid = job['bid'] if worker.respond_to?(:bid=)
|
156
|
+
execute_job(worker, job['args'])
|
157
|
+
end
|
158
|
+
end
|
159
|
+
|
160
|
+
# Pop out a single job and perform it
|
161
|
+
def perform_one
|
162
|
+
raise(EmptyQueueError, "perform_one called with empty job queue") if jobs.empty?
|
163
|
+
job = jobs.shift
|
164
|
+
worker = new
|
165
|
+
worker.jid = job['jid']
|
166
|
+
worker.bid = job['bid'] if worker.respond_to?(:bid=)
|
167
|
+
execute_job(worker, job['args'])
|
168
|
+
end
|
169
|
+
|
170
|
+
def execute_job(worker, args)
|
171
|
+
worker.perform(*args)
|
172
|
+
end
|
173
|
+
end
|
174
|
+
|
175
|
+
class << self
|
176
|
+
def jobs # :nodoc:
|
177
|
+
@jobs ||= Hash.new { |hash, key| hash[key] = [] }
|
178
|
+
end
|
179
|
+
|
180
|
+
# Clear all queued jobs across all workers
|
181
|
+
def clear_all
|
182
|
+
jobs.clear
|
183
|
+
end
|
184
|
+
|
185
|
+
# Drain all queued jobs across all workers
|
186
|
+
def drain_all
|
187
|
+
until jobs.values.all?(&:empty?) do
|
188
|
+
jobs.keys.each(&:drain)
|
189
|
+
end
|
190
|
+
end
|
191
|
+
end
|
192
|
+
end
|
193
|
+
end
|
@@ -0,0 +1,68 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require 'securerandom'
|
3
|
+
require 'roundhouse/exception_handler'
|
4
|
+
require 'roundhouse/core_ext'
|
5
|
+
|
6
|
+
module Roundhouse
|
7
|
+
##
|
8
|
+
# This module is part of Roundhouse core and not intended for extensions.
|
9
|
+
#
|
10
|
+
module Util
|
11
|
+
include ExceptionHandler
|
12
|
+
|
13
|
+
EXPIRY = 60 * 60 * 24
|
14
|
+
|
15
|
+
def watchdog(last_words)
|
16
|
+
yield
|
17
|
+
rescue Exception => ex
|
18
|
+
handle_exception(ex, { context: last_words })
|
19
|
+
raise ex
|
20
|
+
end
|
21
|
+
|
22
|
+
def logger
|
23
|
+
Roundhouse.logger
|
24
|
+
end
|
25
|
+
|
26
|
+
def redis(&block)
|
27
|
+
Roundhouse.redis(&block)
|
28
|
+
end
|
29
|
+
|
30
|
+
def hostname
|
31
|
+
ENV['DYNO'] || Socket.gethostname
|
32
|
+
end
|
33
|
+
|
34
|
+
def process_nonce
|
35
|
+
@@process_nonce ||= SecureRandom.hex(6)
|
36
|
+
end
|
37
|
+
|
38
|
+
def identity
|
39
|
+
@@identity ||= "#{hostname}:#{$$}:#{process_nonce}"
|
40
|
+
end
|
41
|
+
|
42
|
+
def fire_event(event, reverse=false)
|
43
|
+
arr = Roundhouse.options[:lifecycle_events][event]
|
44
|
+
arr.reverse! if reverse
|
45
|
+
arr.each do |block|
|
46
|
+
begin
|
47
|
+
block.call
|
48
|
+
rescue => ex
|
49
|
+
handle_exception(ex, { event: event })
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def want_a_hertz_donut?
|
55
|
+
# what's a hertz donut?
|
56
|
+
# punch! Hurts, don't it?
|
57
|
+
info = Roundhouse.redis {|c| c.info }
|
58
|
+
if info['connected_clients'].to_i > 1000 && info['hz'].to_i >= 10
|
59
|
+
Roundhouse.logger.warn { "Your Redis `hz` setting is too high at #{info['hz']}. See mperham/sidekiq#2431. Set it to 3 in #{info[:config_file]}" }
|
60
|
+
true
|
61
|
+
else
|
62
|
+
Roundhouse.logger.debug { "Redis hz: #{info['hz']}. Client count: #{info['connected_clients']}" }
|
63
|
+
false
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
end
|
68
|
+
end
|