roundhouse-x 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +12 -0
- data/.travis.yml +16 -0
- data/3.0-Upgrade.md +70 -0
- data/Changes.md +1127 -0
- data/Gemfile +27 -0
- data/LICENSE +7 -0
- data/README.md +52 -0
- data/Rakefile +9 -0
- data/bin/roundhouse +19 -0
- data/bin/roundhousectl +93 -0
- data/lib/generators/roundhouse/templates/worker.rb.erb +9 -0
- data/lib/generators/roundhouse/templates/worker_spec.rb.erb +6 -0
- data/lib/generators/roundhouse/templates/worker_test.rb.erb +8 -0
- data/lib/generators/roundhouse/worker_generator.rb +49 -0
- data/lib/roundhouse/actor.rb +39 -0
- data/lib/roundhouse/api.rb +859 -0
- data/lib/roundhouse/cli.rb +396 -0
- data/lib/roundhouse/client.rb +210 -0
- data/lib/roundhouse/core_ext.rb +105 -0
- data/lib/roundhouse/exception_handler.rb +30 -0
- data/lib/roundhouse/fetch.rb +154 -0
- data/lib/roundhouse/launcher.rb +98 -0
- data/lib/roundhouse/logging.rb +104 -0
- data/lib/roundhouse/manager.rb +236 -0
- data/lib/roundhouse/middleware/chain.rb +149 -0
- data/lib/roundhouse/middleware/i18n.rb +41 -0
- data/lib/roundhouse/middleware/server/active_record.rb +13 -0
- data/lib/roundhouse/middleware/server/logging.rb +40 -0
- data/lib/roundhouse/middleware/server/retry_jobs.rb +206 -0
- data/lib/roundhouse/monitor.rb +124 -0
- data/lib/roundhouse/paginator.rb +42 -0
- data/lib/roundhouse/processor.rb +159 -0
- data/lib/roundhouse/rails.rb +24 -0
- data/lib/roundhouse/redis_connection.rb +77 -0
- data/lib/roundhouse/scheduled.rb +115 -0
- data/lib/roundhouse/testing/inline.rb +28 -0
- data/lib/roundhouse/testing.rb +193 -0
- data/lib/roundhouse/util.rb +68 -0
- data/lib/roundhouse/version.rb +3 -0
- data/lib/roundhouse/web.rb +264 -0
- data/lib/roundhouse/web_helpers.rb +249 -0
- data/lib/roundhouse/worker.rb +90 -0
- data/lib/roundhouse.rb +177 -0
- data/roundhouse.gemspec +27 -0
- data/test/config.yml +9 -0
- data/test/env_based_config.yml +11 -0
- data/test/fake_env.rb +0 -0
- data/test/fixtures/en.yml +2 -0
- data/test/helper.rb +49 -0
- data/test/test_api.rb +521 -0
- data/test/test_cli.rb +389 -0
- data/test/test_client.rb +294 -0
- data/test/test_exception_handler.rb +55 -0
- data/test/test_fetch.rb +206 -0
- data/test/test_logging.rb +34 -0
- data/test/test_manager.rb +169 -0
- data/test/test_middleware.rb +160 -0
- data/test/test_monitor.rb +258 -0
- data/test/test_processor.rb +176 -0
- data/test/test_rails.rb +23 -0
- data/test/test_redis_connection.rb +127 -0
- data/test/test_retry.rb +390 -0
- data/test/test_roundhouse.rb +87 -0
- data/test/test_scheduled.rb +120 -0
- data/test/test_scheduling.rb +75 -0
- data/test/test_testing.rb +78 -0
- data/test/test_testing_fake.rb +240 -0
- data/test/test_testing_inline.rb +65 -0
- data/test/test_util.rb +18 -0
- data/test/test_web.rb +605 -0
- data/test/test_web_helpers.rb +52 -0
- data/web/assets/images/bootstrap/glyphicons-halflings-white.png +0 -0
- data/web/assets/images/bootstrap/glyphicons-halflings.png +0 -0
- data/web/assets/images/logo.png +0 -0
- data/web/assets/images/status/active.png +0 -0
- data/web/assets/images/status/idle.png +0 -0
- data/web/assets/images/status-sd8051fd480.png +0 -0
- data/web/assets/javascripts/application.js +83 -0
- data/web/assets/javascripts/dashboard.js +300 -0
- data/web/assets/javascripts/locales/README.md +27 -0
- data/web/assets/javascripts/locales/jquery.timeago.ar.js +96 -0
- data/web/assets/javascripts/locales/jquery.timeago.bg.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.bs.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.ca.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.cs.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.cy.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.da.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.de.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.el.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.en-short.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.en.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.es.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.et.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.fa.js +22 -0
- data/web/assets/javascripts/locales/jquery.timeago.fi.js +28 -0
- data/web/assets/javascripts/locales/jquery.timeago.fr-short.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.fr.js +17 -0
- data/web/assets/javascripts/locales/jquery.timeago.he.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.hr.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.hu.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.hy.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.id.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.it.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.ja.js +19 -0
- data/web/assets/javascripts/locales/jquery.timeago.ko.js +17 -0
- data/web/assets/javascripts/locales/jquery.timeago.lt.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.mk.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.nl.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.no.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.pl.js +31 -0
- data/web/assets/javascripts/locales/jquery.timeago.pt-br.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.pt.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.ro.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.rs.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.ru.js +34 -0
- data/web/assets/javascripts/locales/jquery.timeago.sk.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.sl.js +44 -0
- data/web/assets/javascripts/locales/jquery.timeago.sv.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.th.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.tr.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.uk.js +34 -0
- data/web/assets/javascripts/locales/jquery.timeago.uz.js +19 -0
- data/web/assets/javascripts/locales/jquery.timeago.zh-cn.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.zh-tw.js +20 -0
- data/web/assets/stylesheets/application.css +746 -0
- data/web/assets/stylesheets/bootstrap.css +9 -0
- data/web/locales/cs.yml +68 -0
- data/web/locales/da.yml +68 -0
- data/web/locales/de.yml +69 -0
- data/web/locales/el.yml +68 -0
- data/web/locales/en.yml +77 -0
- data/web/locales/es.yml +69 -0
- data/web/locales/fr.yml +69 -0
- data/web/locales/hi.yml +75 -0
- data/web/locales/it.yml +69 -0
- data/web/locales/ja.yml +69 -0
- data/web/locales/ko.yml +68 -0
- data/web/locales/nl.yml +68 -0
- data/web/locales/no.yml +69 -0
- data/web/locales/pl.yml +59 -0
- data/web/locales/pt-br.yml +68 -0
- data/web/locales/pt.yml +67 -0
- data/web/locales/ru.yml +75 -0
- data/web/locales/sv.yml +68 -0
- data/web/locales/ta.yml +75 -0
- data/web/locales/zh-cn.yml +68 -0
- data/web/locales/zh-tw.yml +68 -0
- data/web/views/_footer.erb +22 -0
- data/web/views/_job_info.erb +84 -0
- data/web/views/_nav.erb +66 -0
- data/web/views/_paging.erb +23 -0
- data/web/views/_poll_js.erb +5 -0
- data/web/views/_poll_link.erb +7 -0
- data/web/views/_status.erb +4 -0
- data/web/views/_summary.erb +40 -0
- data/web/views/busy.erb +90 -0
- data/web/views/dashboard.erb +75 -0
- data/web/views/dead.erb +34 -0
- data/web/views/layout.erb +31 -0
- data/web/views/morgue.erb +71 -0
- data/web/views/queue.erb +45 -0
- data/web/views/queues.erb +27 -0
- data/web/views/retries.erb +74 -0
- data/web/views/retry.erb +34 -0
- data/web/views/scheduled.erb +54 -0
- data/web/views/scheduled_job_info.erb +8 -0
- metadata +404 -0
@@ -0,0 +1,149 @@
|
|
1
|
+
module Roundhouse
|
2
|
+
# Middleware is code configured to run before/after
|
3
|
+
# a message is processed. It is patterned after Rack
|
4
|
+
# middleware. Middleware exists for the client side
|
5
|
+
# (pushing jobs onto the queue) as well as the server
|
6
|
+
# side (when jobs are actually processed).
|
7
|
+
#
|
8
|
+
# To add middleware for the client:
|
9
|
+
#
|
10
|
+
# Roundhouse.configure_client do |config|
|
11
|
+
# config.client_middleware do |chain|
|
12
|
+
# chain.add MyClientHook
|
13
|
+
# end
|
14
|
+
# end
|
15
|
+
#
|
16
|
+
# To modify middleware for the server, just call
|
17
|
+
# with another block:
|
18
|
+
#
|
19
|
+
# Roundhouse.configure_server do |config|
|
20
|
+
# config.server_middleware do |chain|
|
21
|
+
# chain.add MyServerHook
|
22
|
+
# chain.remove ActiveRecord
|
23
|
+
# end
|
24
|
+
# end
|
25
|
+
#
|
26
|
+
# To insert immediately preceding another entry:
|
27
|
+
#
|
28
|
+
# Roundhouse.configure_client do |config|
|
29
|
+
# config.client_middleware do |chain|
|
30
|
+
# chain.insert_before ActiveRecord, MyClientHook
|
31
|
+
# end
|
32
|
+
# end
|
33
|
+
#
|
34
|
+
# To insert immediately after another entry:
|
35
|
+
#
|
36
|
+
# Roundhouse.configure_client do |config|
|
37
|
+
# config.client_middleware do |chain|
|
38
|
+
# chain.insert_after ActiveRecord, MyClientHook
|
39
|
+
# end
|
40
|
+
# end
|
41
|
+
#
|
42
|
+
# This is an example of a minimal server middleware:
|
43
|
+
#
|
44
|
+
# class MyServerHook
|
45
|
+
# def call(worker_instance, msg, queue)
|
46
|
+
# puts "Before work"
|
47
|
+
# yield
|
48
|
+
# puts "After work"
|
49
|
+
# end
|
50
|
+
# end
|
51
|
+
#
|
52
|
+
# This is an example of a minimal client middleware, note
|
53
|
+
# the method must return the result or the job will not push
|
54
|
+
# to Redis:
|
55
|
+
#
|
56
|
+
# class MyClientHook
|
57
|
+
# def call(worker_class, msg, queue, redis_pool)
|
58
|
+
# puts "Before push"
|
59
|
+
# result = yield
|
60
|
+
# puts "After push"
|
61
|
+
# result
|
62
|
+
# end
|
63
|
+
# end
|
64
|
+
#
|
65
|
+
module Middleware
|
66
|
+
class Chain
|
67
|
+
include Enumerable
|
68
|
+
attr_reader :entries
|
69
|
+
|
70
|
+
def initialize_copy(copy)
|
71
|
+
copy.instance_variable_set(:@entries, entries.dup)
|
72
|
+
end
|
73
|
+
|
74
|
+
def each(&block)
|
75
|
+
entries.each(&block)
|
76
|
+
end
|
77
|
+
|
78
|
+
def initialize
|
79
|
+
@entries = []
|
80
|
+
yield self if block_given?
|
81
|
+
end
|
82
|
+
|
83
|
+
def remove(klass)
|
84
|
+
entries.delete_if { |entry| entry.klass == klass }
|
85
|
+
end
|
86
|
+
|
87
|
+
def add(klass, *args)
|
88
|
+
remove(klass) if exists?(klass)
|
89
|
+
entries << Entry.new(klass, *args)
|
90
|
+
end
|
91
|
+
|
92
|
+
def prepend(klass, *args)
|
93
|
+
remove(klass) if exists?(klass)
|
94
|
+
entries.insert(0, Entry.new(klass, *args))
|
95
|
+
end
|
96
|
+
|
97
|
+
def insert_before(oldklass, newklass, *args)
|
98
|
+
i = entries.index { |entry| entry.klass == newklass }
|
99
|
+
new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
|
100
|
+
i = entries.index { |entry| entry.klass == oldklass } || 0
|
101
|
+
entries.insert(i, new_entry)
|
102
|
+
end
|
103
|
+
|
104
|
+
def insert_after(oldklass, newklass, *args)
|
105
|
+
i = entries.index { |entry| entry.klass == newklass }
|
106
|
+
new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
|
107
|
+
i = entries.index { |entry| entry.klass == oldklass } || entries.count - 1
|
108
|
+
entries.insert(i+1, new_entry)
|
109
|
+
end
|
110
|
+
|
111
|
+
def exists?(klass)
|
112
|
+
any? { |entry| entry.klass == klass }
|
113
|
+
end
|
114
|
+
|
115
|
+
def retrieve
|
116
|
+
map(&:make_new)
|
117
|
+
end
|
118
|
+
|
119
|
+
def clear
|
120
|
+
entries.clear
|
121
|
+
end
|
122
|
+
|
123
|
+
def invoke(*args)
|
124
|
+
chain = retrieve.dup
|
125
|
+
traverse_chain = lambda do
|
126
|
+
if chain.empty?
|
127
|
+
yield
|
128
|
+
else
|
129
|
+
chain.shift.call(*args, &traverse_chain)
|
130
|
+
end
|
131
|
+
end
|
132
|
+
traverse_chain.call
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
class Entry
|
137
|
+
attr_reader :klass
|
138
|
+
|
139
|
+
def initialize(klass, *args)
|
140
|
+
@klass = klass
|
141
|
+
@args = args
|
142
|
+
end
|
143
|
+
|
144
|
+
def make_new
|
145
|
+
@klass.new(*@args)
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
#
|
2
|
+
# Simple middleware to save the current locale and restore it when the job executes.
|
3
|
+
# Use it by requiring it in your initializer:
|
4
|
+
#
|
5
|
+
# require 'roundhouse/middleware/i18n'
|
6
|
+
#
|
7
|
+
module Roundhouse::Middleware::I18n
|
8
|
+
# Get the current locale and store it in the message
|
9
|
+
# to be sent to Roundhouse.
|
10
|
+
class Client
|
11
|
+
def call(worker_class, msg, queue, redis_pool)
|
12
|
+
msg['locale'] ||= I18n.locale
|
13
|
+
yield
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
# Pull the msg locale out and set the current thread to use it.
|
18
|
+
class Server
|
19
|
+
def call(worker, msg, queue)
|
20
|
+
I18n.locale = msg['locale'] || I18n.default_locale
|
21
|
+
yield
|
22
|
+
ensure
|
23
|
+
I18n.locale = I18n.default_locale
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
Roundhouse.configure_client do |config|
|
29
|
+
config.client_middleware do |chain|
|
30
|
+
chain.add Roundhouse::Middleware::I18n::Client
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
Roundhouse.configure_server do |config|
|
35
|
+
config.client_middleware do |chain|
|
36
|
+
chain.add Roundhouse::Middleware::I18n::Client
|
37
|
+
end
|
38
|
+
config.server_middleware do |chain|
|
39
|
+
chain.add Roundhouse::Middleware::I18n::Server
|
40
|
+
end
|
41
|
+
end
|
@@ -0,0 +1,40 @@
|
|
1
|
+
module Roundhouse
|
2
|
+
module Middleware
|
3
|
+
module Server
|
4
|
+
class Logging
|
5
|
+
|
6
|
+
def call(worker, item, queue)
|
7
|
+
Roundhouse::Logging.with_context(log_context(worker, item)) do
|
8
|
+
begin
|
9
|
+
start = Time.now
|
10
|
+
logger.info { "start" }
|
11
|
+
yield
|
12
|
+
logger.info { "done: #{elapsed(start)} sec" }
|
13
|
+
rescue Exception
|
14
|
+
logger.info { "fail: #{elapsed(start)} sec" }
|
15
|
+
raise
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
private
|
21
|
+
|
22
|
+
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
23
|
+
# attribute to expose the underlying thing.
|
24
|
+
def log_context(worker, item)
|
25
|
+
klass = item['wrapped'.freeze] || worker.class.to_s
|
26
|
+
"#{klass} JID-#{item['jid'.freeze]}#{" BID-#{item['bid'.freeze]}" if item['bid'.freeze]}"
|
27
|
+
end
|
28
|
+
|
29
|
+
def elapsed(start)
|
30
|
+
(Time.now - start).round(3)
|
31
|
+
end
|
32
|
+
|
33
|
+
def logger
|
34
|
+
Roundhouse.logger
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
@@ -0,0 +1,206 @@
|
|
1
|
+
require 'roundhouse/scheduled'
|
2
|
+
require 'roundhouse/api'
|
3
|
+
|
4
|
+
module Roundhouse
|
5
|
+
module Middleware
|
6
|
+
module Server
|
7
|
+
##
|
8
|
+
# Automatically retry jobs that fail in Roundhouse.
|
9
|
+
# Roundhouse's retry support assumes a typical development lifecycle:
|
10
|
+
#
|
11
|
+
# 0. push some code changes with a bug in it
|
12
|
+
# 1. bug causes job processing to fail, roundhouse's middleware captures
|
13
|
+
# the job and pushes it onto a retry queue
|
14
|
+
# 2. roundhouse retries jobs in the retry queue multiple times with
|
15
|
+
# an exponential delay, the job continues to fail
|
16
|
+
# 3. after a few days, a developer deploys a fix. the job is
|
17
|
+
# reprocessed successfully.
|
18
|
+
# 4. once retries are exhausted, roundhouse will give up and move the
|
19
|
+
# job to the Dead Job Queue (aka morgue) where it must be dealt with
|
20
|
+
# manually in the Web UI.
|
21
|
+
# 5. After 6 months on the DJQ, Roundhouse will discard the job.
|
22
|
+
#
|
23
|
+
# A job looks like:
|
24
|
+
#
|
25
|
+
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
|
26
|
+
#
|
27
|
+
# The 'retry' option also accepts a number (in place of 'true'):
|
28
|
+
#
|
29
|
+
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
|
30
|
+
#
|
31
|
+
# The job will be retried this number of times before giving up. (If simply
|
32
|
+
# 'true', Roundhouse retries 25 times)
|
33
|
+
#
|
34
|
+
# We'll add a bit more data to the job to support retries:
|
35
|
+
#
|
36
|
+
# * 'queue' - the queue to use
|
37
|
+
# * 'retry_count' - number of times we've retried so far.
|
38
|
+
# * 'error_message' - the message from the exception
|
39
|
+
# * 'error_class' - the exception class
|
40
|
+
# * 'failed_at' - the first time it failed
|
41
|
+
# * 'retried_at' - the last time it was retried
|
42
|
+
# * 'backtrace' - the number of lines of error backtrace to store
|
43
|
+
#
|
44
|
+
# We don't store the backtrace by default as that can add a lot of overhead
|
45
|
+
# to the job and everyone is using an error service, right?
|
46
|
+
#
|
47
|
+
# The default number of retry attempts is 25 which works out to about 3 weeks
|
48
|
+
# of retries. You can pass a value for the max number of retry attempts when
|
49
|
+
# adding the middleware using the options hash:
|
50
|
+
#
|
51
|
+
# Roundhouse.configure_server do |config|
|
52
|
+
# config.server_middleware do |chain|
|
53
|
+
# chain.add Roundhouse::Middleware::Server::RetryJobs, :max_retries => 7
|
54
|
+
# end
|
55
|
+
# end
|
56
|
+
#
|
57
|
+
# or limit the number of retries for a particular worker with:
|
58
|
+
#
|
59
|
+
# class MyWorker
|
60
|
+
# include Roundhouse::Worker
|
61
|
+
# roundhouse_options :retry => 10
|
62
|
+
# end
|
63
|
+
#
|
64
|
+
class RetryJobs
|
65
|
+
include Roundhouse::Util
|
66
|
+
|
67
|
+
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
68
|
+
|
69
|
+
def initialize(options = {})
|
70
|
+
@max_retries = options.fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
|
71
|
+
end
|
72
|
+
|
73
|
+
def call(worker, msg, queue)
|
74
|
+
yield
|
75
|
+
rescue Roundhouse::Shutdown
|
76
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
77
|
+
raise
|
78
|
+
rescue Exception => e
|
79
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
80
|
+
raise Roundhouse::Shutdown if exception_caused_by_shutdown?(e)
|
81
|
+
|
82
|
+
raise e unless msg['retry']
|
83
|
+
attempt_retry(worker, msg, queue, e)
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
|
88
|
+
def attempt_retry(worker, msg, queue, exception)
|
89
|
+
max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
|
90
|
+
|
91
|
+
msg['queue'] = if msg['retry_queue']
|
92
|
+
msg['retry_queue']
|
93
|
+
else
|
94
|
+
queue
|
95
|
+
end
|
96
|
+
|
97
|
+
# App code can stuff all sorts of crazy binary data into the error message
|
98
|
+
# that won't convert to JSON.
|
99
|
+
m = exception.message[0..10_000]
|
100
|
+
if m.respond_to?(:scrub!)
|
101
|
+
m.force_encoding("utf-8")
|
102
|
+
m.scrub!
|
103
|
+
end
|
104
|
+
|
105
|
+
msg['error_message'] = m
|
106
|
+
msg['error_class'] = exception.class.name
|
107
|
+
count = if msg['retry_count']
|
108
|
+
msg['retried_at'] = Time.now.to_f
|
109
|
+
msg['retry_count'] += 1
|
110
|
+
else
|
111
|
+
msg['failed_at'] = Time.now.to_f
|
112
|
+
msg['retry_count'] = 0
|
113
|
+
end
|
114
|
+
|
115
|
+
if msg['backtrace'] == true
|
116
|
+
msg['error_backtrace'] = exception.backtrace
|
117
|
+
elsif !msg['backtrace']
|
118
|
+
# do nothing
|
119
|
+
elsif msg['backtrace'].to_i != 0
|
120
|
+
msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
|
121
|
+
end
|
122
|
+
|
123
|
+
if count < max_retry_attempts
|
124
|
+
delay = delay_for(worker, count)
|
125
|
+
logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
126
|
+
retry_at = Time.now.to_f + delay
|
127
|
+
payload = Roundhouse.dump_json(msg)
|
128
|
+
Roundhouse.redis do |conn|
|
129
|
+
conn.zadd('retry', retry_at.to_s, payload)
|
130
|
+
end
|
131
|
+
else
|
132
|
+
# Goodbye dear message, you (re)tried your best I'm sure.
|
133
|
+
retries_exhausted(worker, msg)
|
134
|
+
end
|
135
|
+
|
136
|
+
raise exception
|
137
|
+
end
|
138
|
+
|
139
|
+
def retries_exhausted(worker, msg)
|
140
|
+
logger.debug { "Dropping message after hitting the retry maximum: #{msg}" }
|
141
|
+
begin
|
142
|
+
if worker.roundhouse_retries_exhausted_block?
|
143
|
+
worker.roundhouse_retries_exhausted_block.call(msg)
|
144
|
+
end
|
145
|
+
rescue => e
|
146
|
+
handle_exception(e, { context: "Error calling retries_exhausted for #{worker.class}", job: msg })
|
147
|
+
end
|
148
|
+
|
149
|
+
send_to_morgue(msg) unless msg['dead'] == false
|
150
|
+
end
|
151
|
+
|
152
|
+
def send_to_morgue(msg)
|
153
|
+
Roundhouse.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
|
154
|
+
payload = Roundhouse.dump_json(msg)
|
155
|
+
now = Time.now.to_f
|
156
|
+
Roundhouse.redis do |conn|
|
157
|
+
conn.multi do
|
158
|
+
conn.zadd('dead', now, payload)
|
159
|
+
conn.zremrangebyscore('dead', '-inf', now - DeadSet.timeout)
|
160
|
+
conn.zremrangebyrank('dead', 0, -DeadSet.max_jobs)
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
def retry_attempts_from(msg_retry, default)
|
166
|
+
if msg_retry.is_a?(Fixnum)
|
167
|
+
msg_retry
|
168
|
+
else
|
169
|
+
default
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
def delay_for(worker, count)
|
174
|
+
worker.roundhouse_retry_in_block? && retry_in(worker, count) || seconds_to_delay(count)
|
175
|
+
end
|
176
|
+
|
177
|
+
# delayed_job uses the same basic formula
|
178
|
+
def seconds_to_delay(count)
|
179
|
+
(count ** 4) + 15 + (rand(30)*(count+1))
|
180
|
+
end
|
181
|
+
|
182
|
+
def retry_in(worker, count)
|
183
|
+
begin
|
184
|
+
worker.roundhouse_retry_in_block.call(count)
|
185
|
+
rescue Exception => e
|
186
|
+
handle_exception(e, { context: "Failure scheduling retry using the defined `roundhouse_retry_in` in #{worker.class.name}, falling back to default" })
|
187
|
+
nil
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
def exception_caused_by_shutdown?(e, checked_causes = [])
|
192
|
+
# In Ruby 2.1.0 only, check if exception is a result of shutdown.
|
193
|
+
return false unless defined?(e.cause)
|
194
|
+
|
195
|
+
# Handle circular causes
|
196
|
+
checked_causes << e.object_id
|
197
|
+
return false if checked_causes.include?(e.cause.object_id)
|
198
|
+
|
199
|
+
e.cause.instance_of?(Roundhouse::Shutdown) ||
|
200
|
+
exception_caused_by_shutdown?(e.cause, checked_causes)
|
201
|
+
end
|
202
|
+
|
203
|
+
end
|
204
|
+
end
|
205
|
+
end
|
206
|
+
end
|
@@ -0,0 +1,124 @@
|
|
1
|
+
module Roundhouse
|
2
|
+
# This class implements two things:
|
3
|
+
# 1. A queueing semaphore - the fetcher can pop the next available
|
4
|
+
# exclusive right to something (such as API request with a given
|
5
|
+
# auth token)
|
6
|
+
# 2. Track which access right is temporarily suspended
|
7
|
+
class Monitor
|
8
|
+
ACTIVE = 1
|
9
|
+
EMPTY = 0
|
10
|
+
SUSPENDED = -1
|
11
|
+
|
12
|
+
# This helps catch problems with key names at runtime
|
13
|
+
SEMAPHORE = 'semaphore'.freeze
|
14
|
+
BUCKETS = 'buckets'.freeze
|
15
|
+
QUEUE = 'queue'.freeze
|
16
|
+
SCHEDULE = 'schedule'.freeze
|
17
|
+
STATUS = 'status'.freeze
|
18
|
+
|
19
|
+
class << self
|
20
|
+
# Find the first active queue
|
21
|
+
# If nothing is in the rotation, then block
|
22
|
+
def pop(conn)
|
23
|
+
loop do
|
24
|
+
_, q_id = conn.brpop(SEMAPHORE)
|
25
|
+
return q_id if queue_status(conn, q_id) == ACTIVE
|
26
|
+
end
|
27
|
+
end
|
28
|
+
|
29
|
+
def push(conn, q_id)
|
30
|
+
return unless queue_status(conn, q_id) == ACTIVE
|
31
|
+
conn.lpush(SEMAPHORE, q_id)
|
32
|
+
end
|
33
|
+
|
34
|
+
# Bulk requeue (push from right). Usually done
|
35
|
+
# via Client, when Roundhouse is terminating
|
36
|
+
def requeue(conn, q_id, jobs)
|
37
|
+
conn.rpush("#{QUEUE}:#{q_id}", jobs)
|
38
|
+
end
|
39
|
+
|
40
|
+
def await_next_job(conn)
|
41
|
+
loop do
|
42
|
+
queue_id = pop(conn)
|
43
|
+
job = pop_job(conn, queue_id)
|
44
|
+
return queue_id, job if job
|
45
|
+
Roundhouse::Monitor.set_queue_is_empty(conn, queue_id)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def pop_job(conn, q_id)
|
50
|
+
conn.rpop("#{QUEUE}:#{q_id}")
|
51
|
+
end
|
52
|
+
|
53
|
+
def push_job(conn, payloads)
|
54
|
+
return schedule(conn, payloads) if payloads.first['at']
|
55
|
+
|
56
|
+
q_id = payloads.first['queue_id']
|
57
|
+
now = Time.now.to_f
|
58
|
+
to_push = payloads.map do |entry|
|
59
|
+
entry['enqueued_at'.freeze] = now
|
60
|
+
Roundhouse.dump_json(entry)
|
61
|
+
end
|
62
|
+
conn.lpush("#{QUEUE}:#{q_id}", to_push)
|
63
|
+
|
64
|
+
maybe_add_to_rotation(conn, q_id)
|
65
|
+
end
|
66
|
+
|
67
|
+
def set_queue_is_empty(conn, q_id)
|
68
|
+
set_queue_status(conn, q_id, EMPTY)
|
69
|
+
end
|
70
|
+
|
71
|
+
def activate(conn, q_id)
|
72
|
+
set_queue_status(conn, q_id, ACTIVE)
|
73
|
+
end
|
74
|
+
|
75
|
+
def suspend(conn, q_id)
|
76
|
+
set_queue_status(conn, q_id, SUSPENDED)
|
77
|
+
end
|
78
|
+
|
79
|
+
def resume(conn, q_id)
|
80
|
+
return unless queue_status(conn, q_id) == SUSPENDED
|
81
|
+
set_queue_status(conn, q_id, ACTIVE)
|
82
|
+
conn.lpush(SEMAPHORE, q_id)
|
83
|
+
end
|
84
|
+
|
85
|
+
def queue_status(conn, q_id)
|
86
|
+
conn.hget(status_bucket(q_id), q_id).to_i || EMPTY
|
87
|
+
end
|
88
|
+
|
89
|
+
def maybe_add_to_rotation(conn, q_id)
|
90
|
+
# NOTE: this really should be written in LUA to make
|
91
|
+
# sure this is set to ACTIVE after pushing it into the
|
92
|
+
# queuing semaphore. Otherwise, race conditions might
|
93
|
+
# creep in giving this queue an unfair advantage.
|
94
|
+
# See: https://github.com/resque/redis-namespace/blob/master/lib/redis/namespace.rb#L403-L413
|
95
|
+
# See: https://www.redisgreen.net/blog/intro-to-lua-for-redis-programmers/
|
96
|
+
return false unless queue_status(conn, q_id) == EMPTY
|
97
|
+
activate(conn, q_id)
|
98
|
+
conn.lpush(SEMAPHORE, q_id)
|
99
|
+
end
|
100
|
+
|
101
|
+
def status_bucket(q_id)
|
102
|
+
"#{STATUS}:#{bucket_num(q_id)}"
|
103
|
+
end
|
104
|
+
|
105
|
+
def bucket_num(q_id)
|
106
|
+
q_id.to_i / 1000
|
107
|
+
end
|
108
|
+
|
109
|
+
private
|
110
|
+
|
111
|
+
def schedule(conn, payloads)
|
112
|
+
conn.zadd(SCHEDULE.freeze, payloads.map do |hash|
|
113
|
+
at = hash.delete('at'.freeze).to_s
|
114
|
+
[at, Roundhouse.dump_json(hash)]
|
115
|
+
end )
|
116
|
+
end
|
117
|
+
|
118
|
+
def set_queue_status(conn, q_id, status)
|
119
|
+
conn.sadd(BUCKETS, bucket_num(q_id))
|
120
|
+
conn.hset(status_bucket(q_id), q_id, status)
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
module Roundhouse
|
2
|
+
module Paginator
|
3
|
+
|
4
|
+
def page(key, pageidx=1, page_size=25, opts=nil)
|
5
|
+
current_page = pageidx.to_i < 1 ? 1 : pageidx.to_i
|
6
|
+
pageidx = current_page - 1
|
7
|
+
total_size = 0
|
8
|
+
items = []
|
9
|
+
starting = pageidx * page_size
|
10
|
+
ending = starting + page_size - 1
|
11
|
+
|
12
|
+
Roundhouse.redis do |conn|
|
13
|
+
type = conn.type(key)
|
14
|
+
|
15
|
+
case type
|
16
|
+
when 'zset'
|
17
|
+
rev = opts && opts[:reverse]
|
18
|
+
total_size, items = conn.multi do
|
19
|
+
conn.zcard(key)
|
20
|
+
if rev
|
21
|
+
conn.zrevrange(key, starting, ending, :with_scores => true)
|
22
|
+
else
|
23
|
+
conn.zrange(key, starting, ending, :with_scores => true)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
[current_page, total_size, items]
|
27
|
+
when 'list'
|
28
|
+
total_size, items = conn.multi do
|
29
|
+
conn.llen(key)
|
30
|
+
conn.lrange(key, starting, ending)
|
31
|
+
end
|
32
|
+
[current_page, total_size, items]
|
33
|
+
when 'none'
|
34
|
+
[1, 0, []]
|
35
|
+
else
|
36
|
+
raise "can't page a #{type}"
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
end
|
42
|
+
end
|