roundhouse-x 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +12 -0
- data/.travis.yml +16 -0
- data/3.0-Upgrade.md +70 -0
- data/Changes.md +1127 -0
- data/Gemfile +27 -0
- data/LICENSE +7 -0
- data/README.md +52 -0
- data/Rakefile +9 -0
- data/bin/roundhouse +19 -0
- data/bin/roundhousectl +93 -0
- data/lib/generators/roundhouse/templates/worker.rb.erb +9 -0
- data/lib/generators/roundhouse/templates/worker_spec.rb.erb +6 -0
- data/lib/generators/roundhouse/templates/worker_test.rb.erb +8 -0
- data/lib/generators/roundhouse/worker_generator.rb +49 -0
- data/lib/roundhouse/actor.rb +39 -0
- data/lib/roundhouse/api.rb +859 -0
- data/lib/roundhouse/cli.rb +396 -0
- data/lib/roundhouse/client.rb +210 -0
- data/lib/roundhouse/core_ext.rb +105 -0
- data/lib/roundhouse/exception_handler.rb +30 -0
- data/lib/roundhouse/fetch.rb +154 -0
- data/lib/roundhouse/launcher.rb +98 -0
- data/lib/roundhouse/logging.rb +104 -0
- data/lib/roundhouse/manager.rb +236 -0
- data/lib/roundhouse/middleware/chain.rb +149 -0
- data/lib/roundhouse/middleware/i18n.rb +41 -0
- data/lib/roundhouse/middleware/server/active_record.rb +13 -0
- data/lib/roundhouse/middleware/server/logging.rb +40 -0
- data/lib/roundhouse/middleware/server/retry_jobs.rb +206 -0
- data/lib/roundhouse/monitor.rb +124 -0
- data/lib/roundhouse/paginator.rb +42 -0
- data/lib/roundhouse/processor.rb +159 -0
- data/lib/roundhouse/rails.rb +24 -0
- data/lib/roundhouse/redis_connection.rb +77 -0
- data/lib/roundhouse/scheduled.rb +115 -0
- data/lib/roundhouse/testing/inline.rb +28 -0
- data/lib/roundhouse/testing.rb +193 -0
- data/lib/roundhouse/util.rb +68 -0
- data/lib/roundhouse/version.rb +3 -0
- data/lib/roundhouse/web.rb +264 -0
- data/lib/roundhouse/web_helpers.rb +249 -0
- data/lib/roundhouse/worker.rb +90 -0
- data/lib/roundhouse.rb +177 -0
- data/roundhouse.gemspec +27 -0
- data/test/config.yml +9 -0
- data/test/env_based_config.yml +11 -0
- data/test/fake_env.rb +0 -0
- data/test/fixtures/en.yml +2 -0
- data/test/helper.rb +49 -0
- data/test/test_api.rb +521 -0
- data/test/test_cli.rb +389 -0
- data/test/test_client.rb +294 -0
- data/test/test_exception_handler.rb +55 -0
- data/test/test_fetch.rb +206 -0
- data/test/test_logging.rb +34 -0
- data/test/test_manager.rb +169 -0
- data/test/test_middleware.rb +160 -0
- data/test/test_monitor.rb +258 -0
- data/test/test_processor.rb +176 -0
- data/test/test_rails.rb +23 -0
- data/test/test_redis_connection.rb +127 -0
- data/test/test_retry.rb +390 -0
- data/test/test_roundhouse.rb +87 -0
- data/test/test_scheduled.rb +120 -0
- data/test/test_scheduling.rb +75 -0
- data/test/test_testing.rb +78 -0
- data/test/test_testing_fake.rb +240 -0
- data/test/test_testing_inline.rb +65 -0
- data/test/test_util.rb +18 -0
- data/test/test_web.rb +605 -0
- data/test/test_web_helpers.rb +52 -0
- data/web/assets/images/bootstrap/glyphicons-halflings-white.png +0 -0
- data/web/assets/images/bootstrap/glyphicons-halflings.png +0 -0
- data/web/assets/images/logo.png +0 -0
- data/web/assets/images/status/active.png +0 -0
- data/web/assets/images/status/idle.png +0 -0
- data/web/assets/images/status-sd8051fd480.png +0 -0
- data/web/assets/javascripts/application.js +83 -0
- data/web/assets/javascripts/dashboard.js +300 -0
- data/web/assets/javascripts/locales/README.md +27 -0
- data/web/assets/javascripts/locales/jquery.timeago.ar.js +96 -0
- data/web/assets/javascripts/locales/jquery.timeago.bg.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.bs.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.ca.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.cs.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.cy.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.da.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.de.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.el.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.en-short.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.en.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.es.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.et.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.fa.js +22 -0
- data/web/assets/javascripts/locales/jquery.timeago.fi.js +28 -0
- data/web/assets/javascripts/locales/jquery.timeago.fr-short.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.fr.js +17 -0
- data/web/assets/javascripts/locales/jquery.timeago.he.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.hr.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.hu.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.hy.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.id.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.it.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.ja.js +19 -0
- data/web/assets/javascripts/locales/jquery.timeago.ko.js +17 -0
- data/web/assets/javascripts/locales/jquery.timeago.lt.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.mk.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.nl.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.no.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.pl.js +31 -0
- data/web/assets/javascripts/locales/jquery.timeago.pt-br.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.pt.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.ro.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.rs.js +49 -0
- data/web/assets/javascripts/locales/jquery.timeago.ru.js +34 -0
- data/web/assets/javascripts/locales/jquery.timeago.sk.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.sl.js +44 -0
- data/web/assets/javascripts/locales/jquery.timeago.sv.js +18 -0
- data/web/assets/javascripts/locales/jquery.timeago.th.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.tr.js +16 -0
- data/web/assets/javascripts/locales/jquery.timeago.uk.js +34 -0
- data/web/assets/javascripts/locales/jquery.timeago.uz.js +19 -0
- data/web/assets/javascripts/locales/jquery.timeago.zh-cn.js +20 -0
- data/web/assets/javascripts/locales/jquery.timeago.zh-tw.js +20 -0
- data/web/assets/stylesheets/application.css +746 -0
- data/web/assets/stylesheets/bootstrap.css +9 -0
- data/web/locales/cs.yml +68 -0
- data/web/locales/da.yml +68 -0
- data/web/locales/de.yml +69 -0
- data/web/locales/el.yml +68 -0
- data/web/locales/en.yml +77 -0
- data/web/locales/es.yml +69 -0
- data/web/locales/fr.yml +69 -0
- data/web/locales/hi.yml +75 -0
- data/web/locales/it.yml +69 -0
- data/web/locales/ja.yml +69 -0
- data/web/locales/ko.yml +68 -0
- data/web/locales/nl.yml +68 -0
- data/web/locales/no.yml +69 -0
- data/web/locales/pl.yml +59 -0
- data/web/locales/pt-br.yml +68 -0
- data/web/locales/pt.yml +67 -0
- data/web/locales/ru.yml +75 -0
- data/web/locales/sv.yml +68 -0
- data/web/locales/ta.yml +75 -0
- data/web/locales/zh-cn.yml +68 -0
- data/web/locales/zh-tw.yml +68 -0
- data/web/views/_footer.erb +22 -0
- data/web/views/_job_info.erb +84 -0
- data/web/views/_nav.erb +66 -0
- data/web/views/_paging.erb +23 -0
- data/web/views/_poll_js.erb +5 -0
- data/web/views/_poll_link.erb +7 -0
- data/web/views/_status.erb +4 -0
- data/web/views/_summary.erb +40 -0
- data/web/views/busy.erb +90 -0
- data/web/views/dashboard.erb +75 -0
- data/web/views/dead.erb +34 -0
- data/web/views/layout.erb +31 -0
- data/web/views/morgue.erb +71 -0
- data/web/views/queue.erb +45 -0
- data/web/views/queues.erb +27 -0
- data/web/views/retries.erb +74 -0
- data/web/views/retry.erb +34 -0
- data/web/views/scheduled.erb +54 -0
- data/web/views/scheduled_job_info.erb +8 -0
- metadata +404 -0
@@ -0,0 +1,859 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
require 'roundhouse'
|
3
|
+
|
4
|
+
module Roundhouse
|
5
|
+
class Stats
|
6
|
+
def initialize
|
7
|
+
fetch_stats!
|
8
|
+
end
|
9
|
+
|
10
|
+
def in_rotation; stat :in_rotation end
|
11
|
+
def num_queues; stat :num_queues end
|
12
|
+
def num_empty_queues; stat :num_empty_queues end
|
13
|
+
def num_suspended_queues; stat :num_suspended_queues end
|
14
|
+
def avg_queue_len; stat :avg_queue_len end
|
15
|
+
|
16
|
+
def processed
|
17
|
+
stat :processed
|
18
|
+
end
|
19
|
+
|
20
|
+
def failed
|
21
|
+
stat :failed
|
22
|
+
end
|
23
|
+
|
24
|
+
def scheduled_size
|
25
|
+
stat :scheduled_size
|
26
|
+
end
|
27
|
+
|
28
|
+
def retry_size
|
29
|
+
stat :retry_size
|
30
|
+
end
|
31
|
+
|
32
|
+
def dead_size
|
33
|
+
stat :dead_size
|
34
|
+
end
|
35
|
+
|
36
|
+
def enqueued
|
37
|
+
stat :enqueued
|
38
|
+
end
|
39
|
+
|
40
|
+
def processes_size
|
41
|
+
stat :processes_size
|
42
|
+
end
|
43
|
+
|
44
|
+
def workers_size
|
45
|
+
stat :workers_size
|
46
|
+
end
|
47
|
+
|
48
|
+
def default_queue_latency
|
49
|
+
stat :default_queue_latency
|
50
|
+
end
|
51
|
+
|
52
|
+
def queues
|
53
|
+
Roundhouse::Stats::Queues.new.lengths
|
54
|
+
end
|
55
|
+
|
56
|
+
def fetch_stats!
|
57
|
+
pipe1_res = Roundhouse.redis do |conn|
|
58
|
+
conn.pipelined do
|
59
|
+
conn.get('stat:processed'.freeze)
|
60
|
+
conn.get('stat:failed'.freeze)
|
61
|
+
conn.zcard('schedule'.freeze)
|
62
|
+
conn.zcard('retry'.freeze)
|
63
|
+
conn.zcard('dead'.freeze)
|
64
|
+
conn.scard('processes'.freeze)
|
65
|
+
conn.llen('semaphore'.freeze)
|
66
|
+
conn.smembers('processes'.freeze)
|
67
|
+
conn.smembers(Roundhouse::Monitor::BUCKETS)
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
queues_res = Roundhouse.redis do |conn|
|
72
|
+
conn.pipelined do
|
73
|
+
pipe1_res[8].each { |bucket| conn.hgetall("#{Roundhouse::Monitor::STATUS}:#{bucket}") }
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
all_queue_ids = queues_res.map(&:keys).flatten
|
78
|
+
|
79
|
+
pipe2_res = Roundhouse.redis do |conn|
|
80
|
+
conn.pipelined do
|
81
|
+
pipe1_res[7].each {|key| conn.hget(key, 'busy'.freeze) }
|
82
|
+
all_queue_ids.each {|queue| conn.llen("queue:#{queue}") }
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
s = pipe1_res[7].size
|
87
|
+
workers_size = pipe2_res[0...s].map(&:to_i).inject(0, &:+)
|
88
|
+
enqueued = pipe2_res[s..-1].map(&:to_i).inject(0, &:+)
|
89
|
+
|
90
|
+
# Calculate queue status
|
91
|
+
all_queue_count = all_queue_ids.size
|
92
|
+
empty_queues = 0
|
93
|
+
suspended_queues = 0
|
94
|
+
queues_res.each do |h|
|
95
|
+
h.each do |_,v|
|
96
|
+
case v.to_i
|
97
|
+
when Roundhouse::Monitor::EMPTY then empty_queues += 1
|
98
|
+
when Roundhouse::Monitor::SUSPENDED then suspended_queues +=1
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
avg_queue_len = (all_queue_count == 0 ? nil : enqueued / all_queue_count)
|
104
|
+
|
105
|
+
#default_queue_latency = if (entry = pipe1_res[6].first)
|
106
|
+
# Time.now.to_f - Roundhouse.load_json(entry)['enqueued_at'.freeze]
|
107
|
+
# else
|
108
|
+
# 0
|
109
|
+
# end
|
110
|
+
|
111
|
+
@stats = {
|
112
|
+
in_rotation: pipe1_res[6].to_i,
|
113
|
+
processed: pipe1_res[0].to_i,
|
114
|
+
failed: pipe1_res[1].to_i,
|
115
|
+
scheduled_size: pipe1_res[2],
|
116
|
+
retry_size: pipe1_res[3],
|
117
|
+
dead_size: pipe1_res[4],
|
118
|
+
processes_size: pipe1_res[5],
|
119
|
+
|
120
|
+
#default_queue_latency: default_queue_latency,
|
121
|
+
workers_size: workers_size,
|
122
|
+
enqueued: enqueued,
|
123
|
+
num_queues: all_queue_count,
|
124
|
+
num_empty_queues: empty_queues,
|
125
|
+
num_suspended_queues: suspended_queues,
|
126
|
+
avg_queue_len: avg_queue_len
|
127
|
+
}
|
128
|
+
end
|
129
|
+
|
130
|
+
def reset(*stats)
|
131
|
+
all = %w(failed processed)
|
132
|
+
stats = stats.empty? ? all : all & stats.flatten.compact.map(&:to_s)
|
133
|
+
|
134
|
+
mset_args = []
|
135
|
+
stats.each do |stat|
|
136
|
+
mset_args << "stat:#{stat}"
|
137
|
+
mset_args << 0
|
138
|
+
end
|
139
|
+
Roundhouse.redis do |conn|
|
140
|
+
conn.mset(*mset_args)
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
private
|
145
|
+
|
146
|
+
def stat(s)
|
147
|
+
@stats[s]
|
148
|
+
end
|
149
|
+
|
150
|
+
class Queues
|
151
|
+
def lengths
|
152
|
+
Roundhouse.redis do |conn|
|
153
|
+
# Refactor note: this set contains the bucket names, not
|
154
|
+
# the queue names
|
155
|
+
queues = conn.smembers('queues'.freeze)
|
156
|
+
|
157
|
+
lengths = conn.pipelined do
|
158
|
+
queues.each do |queue|
|
159
|
+
conn.llen("queue:#{queue}")
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
i = 0
|
164
|
+
array_of_arrays = queues.inject({}) do |memo, queue|
|
165
|
+
memo[queue] = lengths[i]
|
166
|
+
i += 1
|
167
|
+
memo
|
168
|
+
end.sort_by { |_, size| size }
|
169
|
+
|
170
|
+
Hash[array_of_arrays.reverse]
|
171
|
+
end
|
172
|
+
end
|
173
|
+
end
|
174
|
+
|
175
|
+
class History
|
176
|
+
def initialize(days_previous, start_date = nil)
|
177
|
+
@days_previous = days_previous
|
178
|
+
@start_date = start_date || Time.now.utc.to_date
|
179
|
+
end
|
180
|
+
|
181
|
+
def processed
|
182
|
+
date_stat_hash("processed")
|
183
|
+
end
|
184
|
+
|
185
|
+
def failed
|
186
|
+
date_stat_hash("failed")
|
187
|
+
end
|
188
|
+
|
189
|
+
private
|
190
|
+
|
191
|
+
def date_stat_hash(stat)
|
192
|
+
i = 0
|
193
|
+
stat_hash = {}
|
194
|
+
keys = []
|
195
|
+
dates = []
|
196
|
+
|
197
|
+
while i < @days_previous
|
198
|
+
date = @start_date - i
|
199
|
+
datestr = date.strftime("%Y-%m-%d".freeze)
|
200
|
+
keys << "stat:#{stat}:#{datestr}"
|
201
|
+
dates << datestr
|
202
|
+
i += 1
|
203
|
+
end
|
204
|
+
|
205
|
+
Roundhouse.redis do |conn|
|
206
|
+
conn.mget(keys).each_with_index do |value, idx|
|
207
|
+
stat_hash[dates[idx]] = value ? value.to_i : 0
|
208
|
+
end
|
209
|
+
end
|
210
|
+
|
211
|
+
stat_hash
|
212
|
+
end
|
213
|
+
end
|
214
|
+
end
|
215
|
+
|
216
|
+
##
|
217
|
+
# Encapsulates a queue within Roundhouse.
|
218
|
+
# Allows enumeration of all jobs within the queue
|
219
|
+
# and deletion of jobs.
|
220
|
+
#
|
221
|
+
# queue = Roundhouse::Queue.new("mailer")
|
222
|
+
# queue.each do |job|
|
223
|
+
# job.klass # => 'MyWorker'
|
224
|
+
# job.args # => [1, 2, 3]
|
225
|
+
# job.delete if job.jid == 'abcdef1234567890'
|
226
|
+
# end
|
227
|
+
#
|
228
|
+
class Queue
|
229
|
+
include Enumerable
|
230
|
+
|
231
|
+
def self.all
|
232
|
+
Roundhouse.redis do |c|
|
233
|
+
c.smembers(Roundhouse::Monitor::BUCKETS).map { |bucket_num| c.hkeys("#{Roundhouse::Monitor::STATUS}:#{bucket_num}") }
|
234
|
+
end.flatten.sort.map {|q| Roundhouse::Queue.new(q) }
|
235
|
+
end
|
236
|
+
|
237
|
+
attr_reader :queue_id
|
238
|
+
|
239
|
+
def initialize(queue_id)
|
240
|
+
@queue_id = queue_id.to_i
|
241
|
+
@rname = "queue:#{queue_id}"
|
242
|
+
end
|
243
|
+
|
244
|
+
def status
|
245
|
+
case Roundhouse.redis { |conn| Roundhouse::Monitor.queue_status(conn, queue_id) }
|
246
|
+
when Roundhouse::Monitor::ACTIVE then :active
|
247
|
+
when Roundhouse::Monitor::EMPTY then :empty
|
248
|
+
when Roundhouse::Monitor::SUSPENDED then :suspended
|
249
|
+
else :unknown
|
250
|
+
end
|
251
|
+
end
|
252
|
+
|
253
|
+
def bucket
|
254
|
+
Roundhouse::Monitor.status_bucket(queue_id)
|
255
|
+
end
|
256
|
+
|
257
|
+
def size
|
258
|
+
Roundhouse.redis { |con| con.llen(@rname) }
|
259
|
+
end
|
260
|
+
|
261
|
+
# Roundhouse Pro overrides this
|
262
|
+
def paused?
|
263
|
+
false
|
264
|
+
end
|
265
|
+
|
266
|
+
def latency
|
267
|
+
entry = Roundhouse.redis do |conn|
|
268
|
+
conn.lrange(@rname, -1, -1)
|
269
|
+
end.first
|
270
|
+
return 0 unless entry
|
271
|
+
Time.now.to_f - Roundhouse.load_json(entry)['enqueued_at']
|
272
|
+
end
|
273
|
+
|
274
|
+
def each
|
275
|
+
initial_size = size
|
276
|
+
deleted_size = 0
|
277
|
+
page = 0
|
278
|
+
page_size = 50
|
279
|
+
|
280
|
+
loop do
|
281
|
+
range_start = page * page_size - deleted_size
|
282
|
+
range_end = page * page_size - deleted_size + (page_size - 1)
|
283
|
+
entries = Roundhouse.redis do |conn|
|
284
|
+
conn.lrange @rname, range_start, range_end
|
285
|
+
end
|
286
|
+
break if entries.empty?
|
287
|
+
page += 1
|
288
|
+
entries.each do |entry|
|
289
|
+
yield Job.new(entry, @name)
|
290
|
+
end
|
291
|
+
deleted_size = initial_size - size
|
292
|
+
end
|
293
|
+
end
|
294
|
+
|
295
|
+
def find_job(jid)
|
296
|
+
detect { |j| j.jid == jid }
|
297
|
+
end
|
298
|
+
|
299
|
+
def clear
|
300
|
+
Roundhouse.redis do |conn|
|
301
|
+
conn.multi do
|
302
|
+
conn.del(@rname)
|
303
|
+
conn.hdel(bucket, queue_id)
|
304
|
+
end
|
305
|
+
end
|
306
|
+
end
|
307
|
+
alias_method :💣, :clear
|
308
|
+
end
|
309
|
+
|
310
|
+
##
|
311
|
+
# Encapsulates a pending job within a Roundhouse queue or
|
312
|
+
# sorted set.
|
313
|
+
#
|
314
|
+
# The job should be considered immutable but may be
|
315
|
+
# removed from the queue via Job#delete.
|
316
|
+
#
|
317
|
+
class Job
|
318
|
+
attr_reader :item
|
319
|
+
|
320
|
+
def initialize(item, queue_name=nil)
|
321
|
+
@value = item
|
322
|
+
@item = item.is_a?(Hash) ? item : Roundhouse.load_json(item)
|
323
|
+
@queue_id = queue_name || @item['queue_id']
|
324
|
+
end
|
325
|
+
|
326
|
+
def klass
|
327
|
+
@item['class']
|
328
|
+
end
|
329
|
+
|
330
|
+
def display_class
|
331
|
+
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
332
|
+
@klass ||= case klass
|
333
|
+
when /\ARoundhouse::Extensions::Delayed/
|
334
|
+
safe_load(args[0], klass) do |target, method, _|
|
335
|
+
"#{target}.#{method}"
|
336
|
+
end
|
337
|
+
when "ActiveJob::QueueAdapters::RoundhouseAdapter::JobWrapper"
|
338
|
+
@item['wrapped'] || args[0]
|
339
|
+
else
|
340
|
+
klass
|
341
|
+
end
|
342
|
+
end
|
343
|
+
|
344
|
+
def display_args
|
345
|
+
# Unwrap known wrappers so they show up in a human-friendly manner in the Web UI
|
346
|
+
@args ||= case klass
|
347
|
+
when /\ARoundhouse::Extensions::Delayed/
|
348
|
+
safe_load(args[0], args) do |_, _, arg|
|
349
|
+
arg
|
350
|
+
end
|
351
|
+
when "ActiveJob::QueueAdapters::RoundhouseAdapter::JobWrapper"
|
352
|
+
@item['wrapped'] ? args[0]["arguments"] : []
|
353
|
+
else
|
354
|
+
args
|
355
|
+
end
|
356
|
+
end
|
357
|
+
|
358
|
+
def args
|
359
|
+
@item['args']
|
360
|
+
end
|
361
|
+
|
362
|
+
def jid
|
363
|
+
@item['jid']
|
364
|
+
end
|
365
|
+
|
366
|
+
def enqueued_at
|
367
|
+
@item['enqueued_at'] ? Time.at(@item['enqueued_at']).utc : nil
|
368
|
+
end
|
369
|
+
|
370
|
+
def created_at
|
371
|
+
Time.at(@item['created_at'] || @item['enqueued_at'] || 0).utc
|
372
|
+
end
|
373
|
+
|
374
|
+
def queue_id
|
375
|
+
@queue_id
|
376
|
+
end
|
377
|
+
|
378
|
+
def latency
|
379
|
+
Time.now.to_f - (@item['enqueued_at'] || @item['created_at'])
|
380
|
+
end
|
381
|
+
|
382
|
+
##
|
383
|
+
# Remove this job from the queue.
|
384
|
+
def delete
|
385
|
+
count = Roundhouse.redis do |conn|
|
386
|
+
conn.lrem("#{Roundhouse::Monitor::QUEUE}:#{@queue_id}", 1, @value)
|
387
|
+
end
|
388
|
+
count != 0
|
389
|
+
end
|
390
|
+
|
391
|
+
def [](name)
|
392
|
+
@item.__send__(:[], name)
|
393
|
+
end
|
394
|
+
|
395
|
+
private
|
396
|
+
|
397
|
+
def safe_load(content, default)
|
398
|
+
begin
|
399
|
+
yield(*YAML.load(content))
|
400
|
+
rescue => ex
|
401
|
+
# #1761 in dev mode, it's possible to have jobs enqueued which haven't been loaded into
|
402
|
+
# memory yet so the YAML can't be loaded.
|
403
|
+
Roundhouse.logger.warn "Unable to load YAML: #{ex.message}" unless Roundhouse.options[:environment] == 'development'
|
404
|
+
default
|
405
|
+
end
|
406
|
+
end
|
407
|
+
end
|
408
|
+
|
409
|
+
class SortedEntry < Job
|
410
|
+
attr_reader :score
|
411
|
+
attr_reader :parent
|
412
|
+
|
413
|
+
def initialize(parent, score, item)
|
414
|
+
super(item)
|
415
|
+
@score = score
|
416
|
+
@parent = parent
|
417
|
+
end
|
418
|
+
|
419
|
+
def at
|
420
|
+
Time.at(score).utc
|
421
|
+
end
|
422
|
+
|
423
|
+
def delete
|
424
|
+
if @value
|
425
|
+
@parent.delete_by_value(@parent.name, @value)
|
426
|
+
else
|
427
|
+
@parent.delete_by_jid(score, jid)
|
428
|
+
end
|
429
|
+
end
|
430
|
+
|
431
|
+
def reschedule(at)
|
432
|
+
delete
|
433
|
+
@parent.schedule(at, item)
|
434
|
+
end
|
435
|
+
|
436
|
+
def add_to_queue
|
437
|
+
remove_job do |message|
|
438
|
+
msg = Roundhouse.load_json(message)
|
439
|
+
Roundhouse::Client.push(msg)
|
440
|
+
end
|
441
|
+
end
|
442
|
+
|
443
|
+
def retry
|
444
|
+
raise "Retry not available on jobs which have not failed" unless item["failed_at"]
|
445
|
+
remove_job do |message|
|
446
|
+
msg = Roundhouse.load_json(message)
|
447
|
+
msg['retry_count'] -= 1
|
448
|
+
Roundhouse::Client.push(msg)
|
449
|
+
end
|
450
|
+
end
|
451
|
+
|
452
|
+
##
|
453
|
+
# Place job in the dead set
|
454
|
+
def kill
|
455
|
+
raise 'Kill not available on jobs which have not failed' unless item['failed_at']
|
456
|
+
remove_job do |message|
|
457
|
+
Roundhouse.logger.info { "Killing job #{message['jid']}" }
|
458
|
+
now = Time.now.to_f
|
459
|
+
Roundhouse.redis do |conn|
|
460
|
+
conn.multi do
|
461
|
+
conn.zadd('dead', now, message)
|
462
|
+
conn.zremrangebyscore('dead', '-inf', now - DeadSet.timeout)
|
463
|
+
conn.zremrangebyrank('dead', 0, - DeadSet.max_jobs)
|
464
|
+
end
|
465
|
+
end
|
466
|
+
end
|
467
|
+
end
|
468
|
+
|
469
|
+
private
|
470
|
+
|
471
|
+
def remove_job
|
472
|
+
Roundhouse.redis do |conn|
|
473
|
+
results = conn.multi do
|
474
|
+
conn.zrangebyscore(parent.name, score, score)
|
475
|
+
conn.zremrangebyscore(parent.name, score, score)
|
476
|
+
end.first
|
477
|
+
|
478
|
+
if results.size == 1
|
479
|
+
yield results.first
|
480
|
+
else
|
481
|
+
# multiple jobs with the same score
|
482
|
+
# find the one with the right JID and push it
|
483
|
+
hash = results.group_by do |message|
|
484
|
+
if message.index(jid)
|
485
|
+
msg = Roundhouse.load_json(message)
|
486
|
+
msg['jid'] == jid
|
487
|
+
else
|
488
|
+
false
|
489
|
+
end
|
490
|
+
end
|
491
|
+
|
492
|
+
msg = hash.fetch(true, []).first
|
493
|
+
yield msg if msg
|
494
|
+
|
495
|
+
# push the rest back onto the sorted set
|
496
|
+
conn.multi do
|
497
|
+
hash.fetch(false, []).each do |message|
|
498
|
+
conn.zadd(parent.name, score.to_f.to_s, message)
|
499
|
+
end
|
500
|
+
end
|
501
|
+
end
|
502
|
+
end
|
503
|
+
end
|
504
|
+
|
505
|
+
end
|
506
|
+
|
507
|
+
class SortedSet
|
508
|
+
include Enumerable
|
509
|
+
|
510
|
+
attr_reader :name
|
511
|
+
|
512
|
+
def initialize(name)
|
513
|
+
@name = name
|
514
|
+
@_size = size
|
515
|
+
end
|
516
|
+
|
517
|
+
def size
|
518
|
+
Roundhouse.redis { |c| c.zcard(name) }
|
519
|
+
end
|
520
|
+
|
521
|
+
def clear
|
522
|
+
Roundhouse.redis do |conn|
|
523
|
+
conn.del(name)
|
524
|
+
end
|
525
|
+
end
|
526
|
+
alias_method :💣, :clear
|
527
|
+
end
|
528
|
+
|
529
|
+
class JobSet < SortedSet
|
530
|
+
|
531
|
+
def schedule(timestamp, message)
|
532
|
+
Roundhouse.redis do |conn|
|
533
|
+
conn.zadd(name, timestamp.to_f.to_s, Roundhouse.dump_json(message))
|
534
|
+
end
|
535
|
+
end
|
536
|
+
|
537
|
+
def each
|
538
|
+
initial_size = @_size
|
539
|
+
offset_size = 0
|
540
|
+
page = -1
|
541
|
+
page_size = 50
|
542
|
+
|
543
|
+
loop do
|
544
|
+
range_start = page * page_size + offset_size
|
545
|
+
range_end = page * page_size + offset_size + (page_size - 1)
|
546
|
+
elements = Roundhouse.redis do |conn|
|
547
|
+
conn.zrange name, range_start, range_end, with_scores: true
|
548
|
+
end
|
549
|
+
break if elements.empty?
|
550
|
+
page -= 1
|
551
|
+
elements.each do |element, score|
|
552
|
+
yield SortedEntry.new(self, score, element)
|
553
|
+
end
|
554
|
+
offset_size = initial_size - @_size
|
555
|
+
end
|
556
|
+
end
|
557
|
+
|
558
|
+
def fetch(score, jid = nil)
|
559
|
+
elements = Roundhouse.redis do |conn|
|
560
|
+
conn.zrangebyscore(name, score, score)
|
561
|
+
end
|
562
|
+
|
563
|
+
elements.inject([]) do |result, element|
|
564
|
+
entry = SortedEntry.new(self, score, element)
|
565
|
+
if jid
|
566
|
+
result << entry if entry.jid == jid
|
567
|
+
else
|
568
|
+
result << entry
|
569
|
+
end
|
570
|
+
result
|
571
|
+
end
|
572
|
+
end
|
573
|
+
|
574
|
+
def find_job(jid)
|
575
|
+
self.detect { |j| j.jid == jid }
|
576
|
+
end
|
577
|
+
|
578
|
+
def delete_by_value(name, value)
|
579
|
+
Roundhouse.redis do |conn|
|
580
|
+
ret = conn.zrem(name, value)
|
581
|
+
@_size -= 1 if ret
|
582
|
+
ret
|
583
|
+
end
|
584
|
+
end
|
585
|
+
|
586
|
+
def delete_by_jid(score, jid)
|
587
|
+
Roundhouse.redis do |conn|
|
588
|
+
elements = conn.zrangebyscore(name, score, score)
|
589
|
+
elements.each do |element|
|
590
|
+
message = Roundhouse.load_json(element)
|
591
|
+
if message["jid"] == jid
|
592
|
+
ret = conn.zrem(name, element)
|
593
|
+
@_size -= 1 if ret
|
594
|
+
break ret
|
595
|
+
end
|
596
|
+
false
|
597
|
+
end
|
598
|
+
end
|
599
|
+
end
|
600
|
+
|
601
|
+
alias_method :delete, :delete_by_jid
|
602
|
+
end
|
603
|
+
|
604
|
+
##
|
605
|
+
# Allows enumeration of scheduled jobs within Roundhouse.
|
606
|
+
# Based on this, you can search/filter for jobs. Here's an
|
607
|
+
# example where I'm selecting all jobs of a certain type
|
608
|
+
# and deleting them from the retry queue.
|
609
|
+
#
|
610
|
+
# r = Roundhouse::ScheduledSet.new
|
611
|
+
# r.select do |retri|
|
612
|
+
# retri.klass == 'Roundhouse::Extensions::DelayedClass' &&
|
613
|
+
# retri.args[0] == 'User' &&
|
614
|
+
# retri.args[1] == 'setup_new_subscriber'
|
615
|
+
# end.map(&:delete)
|
616
|
+
class ScheduledSet < JobSet
|
617
|
+
def initialize
|
618
|
+
super 'schedule'
|
619
|
+
end
|
620
|
+
end
|
621
|
+
|
622
|
+
##
|
623
|
+
# Allows enumeration of retries within Roundhouse.
|
624
|
+
# Based on this, you can search/filter for jobs. Here's an
|
625
|
+
# example where I'm selecting all jobs of a certain type
|
626
|
+
# and deleting them from the retry queue.
|
627
|
+
#
|
628
|
+
# r = Roundhouse::RetrySet.new
|
629
|
+
# r.select do |retri|
|
630
|
+
# retri.klass == 'Roundhouse::Extensions::DelayedClass' &&
|
631
|
+
# retri.args[0] == 'User' &&
|
632
|
+
# retri.args[1] == 'setup_new_subscriber'
|
633
|
+
# end.map(&:delete)
|
634
|
+
class RetrySet < JobSet
|
635
|
+
def initialize
|
636
|
+
super 'retry'
|
637
|
+
end
|
638
|
+
|
639
|
+
def retry_all
|
640
|
+
while size > 0
|
641
|
+
each(&:retry)
|
642
|
+
end
|
643
|
+
end
|
644
|
+
end
|
645
|
+
|
646
|
+
##
|
647
|
+
# Allows enumeration of dead jobs within Roundhouse.
|
648
|
+
#
|
649
|
+
class DeadSet < JobSet
|
650
|
+
def initialize
|
651
|
+
super 'dead'
|
652
|
+
end
|
653
|
+
|
654
|
+
def retry_all
|
655
|
+
while size > 0
|
656
|
+
each(&:retry)
|
657
|
+
end
|
658
|
+
end
|
659
|
+
|
660
|
+
def self.max_jobs
|
661
|
+
Roundhouse.options[:dead_max_jobs]
|
662
|
+
end
|
663
|
+
|
664
|
+
def self.timeout
|
665
|
+
Roundhouse.options[:dead_timeout_in_seconds]
|
666
|
+
end
|
667
|
+
end
|
668
|
+
|
669
|
+
##
|
670
|
+
# Enumerates the set of Roundhouse processes which are actively working
|
671
|
+
# right now. Each process send a heartbeat to Redis every 5 seconds
|
672
|
+
# so this set should be relatively accurate, barring network partitions.
|
673
|
+
#
|
674
|
+
# Yields a Roundhouse::Process.
|
675
|
+
#
|
676
|
+
|
677
|
+
class ProcessSet
|
678
|
+
include Enumerable
|
679
|
+
|
680
|
+
def initialize(clean_plz=true)
|
681
|
+
self.class.cleanup if clean_plz
|
682
|
+
end
|
683
|
+
|
684
|
+
# Cleans up dead processes recorded in Redis.
|
685
|
+
# Returns the number of processes cleaned.
|
686
|
+
def self.cleanup
|
687
|
+
count = 0
|
688
|
+
Roundhouse.redis do |conn|
|
689
|
+
procs = conn.smembers('processes').sort
|
690
|
+
heartbeats = conn.pipelined do
|
691
|
+
procs.each do |key|
|
692
|
+
conn.hget(key, 'info')
|
693
|
+
end
|
694
|
+
end
|
695
|
+
|
696
|
+
# the hash named key has an expiry of 60 seconds.
|
697
|
+
# if it's not found, that means the process has not reported
|
698
|
+
# in to Redis and probably died.
|
699
|
+
to_prune = []
|
700
|
+
heartbeats.each_with_index do |beat, i|
|
701
|
+
to_prune << procs[i] if beat.nil?
|
702
|
+
end
|
703
|
+
count = conn.srem('processes', to_prune) unless to_prune.empty?
|
704
|
+
end
|
705
|
+
count
|
706
|
+
end
|
707
|
+
|
708
|
+
def each
|
709
|
+
procs = Roundhouse.redis { |conn| conn.smembers('processes') }.sort
|
710
|
+
|
711
|
+
Roundhouse.redis do |conn|
|
712
|
+
# We're making a tradeoff here between consuming more memory instead of
|
713
|
+
# making more roundtrips to Redis, but if you have hundreds or thousands of workers,
|
714
|
+
# you'll be happier this way
|
715
|
+
result = conn.pipelined do
|
716
|
+
procs.each do |key|
|
717
|
+
conn.hmget(key, 'info', 'busy', 'beat')
|
718
|
+
end
|
719
|
+
end
|
720
|
+
|
721
|
+
result.each do |info, busy, at_s|
|
722
|
+
hash = Roundhouse.load_json(info)
|
723
|
+
yield Process.new(hash.merge('busy' => busy.to_i, 'beat' => at_s.to_f))
|
724
|
+
end
|
725
|
+
end
|
726
|
+
|
727
|
+
nil
|
728
|
+
end
|
729
|
+
|
730
|
+
# This method is not guaranteed accurate since it does not prune the set
|
731
|
+
# based on current heartbeat. #each does that and ensures the set only
|
732
|
+
# contains Roundhouse processes which have sent a heartbeat within the last
|
733
|
+
# 60 seconds.
|
734
|
+
def size
|
735
|
+
Roundhouse.redis { |conn| conn.scard('processes') }
|
736
|
+
end
|
737
|
+
end
|
738
|
+
|
739
|
+
#
|
740
|
+
# Roundhouse::Process has a set of attributes which look like this:
|
741
|
+
#
|
742
|
+
# {
|
743
|
+
# 'hostname' => 'app-1.example.com',
|
744
|
+
# 'started_at' => <process start time>,
|
745
|
+
# 'pid' => 12345,
|
746
|
+
# 'tag' => 'myapp'
|
747
|
+
# 'concurrency' => 25,
|
748
|
+
# 'queues' => ['default', 'low'],
|
749
|
+
# 'busy' => 10,
|
750
|
+
# 'beat' => <last heartbeat>,
|
751
|
+
# 'identity' => <unique string identifying the process>,
|
752
|
+
# }
|
753
|
+
class Process
|
754
|
+
def initialize(hash)
|
755
|
+
@attribs = hash
|
756
|
+
end
|
757
|
+
|
758
|
+
def tag
|
759
|
+
self['tag']
|
760
|
+
end
|
761
|
+
|
762
|
+
def labels
|
763
|
+
Array(self['labels'])
|
764
|
+
end
|
765
|
+
|
766
|
+
def [](key)
|
767
|
+
@attribs[key]
|
768
|
+
end
|
769
|
+
|
770
|
+
def quiet!
|
771
|
+
signal('USR1')
|
772
|
+
end
|
773
|
+
|
774
|
+
def stop!
|
775
|
+
signal('TERM')
|
776
|
+
end
|
777
|
+
|
778
|
+
def dump_threads
|
779
|
+
signal('TTIN')
|
780
|
+
end
|
781
|
+
|
782
|
+
private
|
783
|
+
|
784
|
+
def signal(sig)
|
785
|
+
key = "#{identity}-signals"
|
786
|
+
Roundhouse.redis do |c|
|
787
|
+
c.multi do
|
788
|
+
c.lpush(key, sig)
|
789
|
+
c.expire(key, 60)
|
790
|
+
end
|
791
|
+
end
|
792
|
+
end
|
793
|
+
|
794
|
+
def identity
|
795
|
+
self['identity']
|
796
|
+
end
|
797
|
+
end
|
798
|
+
|
799
|
+
##
|
800
|
+
# Programmatic access to the current active worker set.
|
801
|
+
#
|
802
|
+
# WARNING WARNING WARNING
|
803
|
+
#
|
804
|
+
# This is live data that can change every millisecond.
|
805
|
+
# If you call #size => 5 and then expect #each to be
|
806
|
+
# called 5 times, you're going to have a bad time.
|
807
|
+
#
|
808
|
+
# workers = Roundhouse::Workers.new
|
809
|
+
# workers.size => 2
|
810
|
+
# workers.each do |process_id, thread_id, work|
|
811
|
+
# # process_id is a unique identifier per Roundhouse process
|
812
|
+
# # thread_id is a unique identifier per thread
|
813
|
+
# # work is a Hash which looks like:
|
814
|
+
# # { 'queue' => name, 'run_at' => timestamp, 'payload' => msg }
|
815
|
+
# # run_at is an epoch Integer.
|
816
|
+
# end
|
817
|
+
#
|
818
|
+
class Workers
|
819
|
+
include Enumerable
|
820
|
+
|
821
|
+
def each
|
822
|
+
Roundhouse.redis do |conn|
|
823
|
+
procs = conn.smembers('processes')
|
824
|
+
procs.sort.each do |key|
|
825
|
+
valid, workers = conn.pipelined do
|
826
|
+
conn.exists(key)
|
827
|
+
conn.hgetall("#{key}:workers")
|
828
|
+
end
|
829
|
+
next unless valid
|
830
|
+
workers.each_pair do |tid, json|
|
831
|
+
yield key, tid, Roundhouse.load_json(json)
|
832
|
+
end
|
833
|
+
end
|
834
|
+
end
|
835
|
+
end
|
836
|
+
|
837
|
+
# Note that #size is only as accurate as Roundhouse's heartbeat,
|
838
|
+
# which happens every 5 seconds. It is NOT real-time.
|
839
|
+
#
|
840
|
+
# Not very efficient if you have lots of Roundhouse
|
841
|
+
# processes but the alternative is a global counter
|
842
|
+
# which can easily get out of sync with crashy processes.
|
843
|
+
def size
|
844
|
+
Roundhouse.redis do |conn|
|
845
|
+
procs = conn.smembers('processes')
|
846
|
+
if procs.empty?
|
847
|
+
0
|
848
|
+
else
|
849
|
+
conn.pipelined do
|
850
|
+
procs.each do |key|
|
851
|
+
conn.hget(key, 'busy')
|
852
|
+
end
|
853
|
+
end.map(&:to_i).inject(:+)
|
854
|
+
end
|
855
|
+
end
|
856
|
+
end
|
857
|
+
end
|
858
|
+
|
859
|
+
end
|