kthxbye 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.document +5 -0
- data/.gitignore +33 -0
- data/DESIGN.textile +81 -0
- data/Gemfile +21 -0
- data/Gemfile.lock +42 -0
- data/LICENSE +20 -0
- data/README.textile +91 -0
- data/Rakefile +53 -0
- data/VERSION +1 -0
- data/config.ru +7 -0
- data/lib/kthxbye.rb +151 -0
- data/lib/kthxbye/config.rb +35 -0
- data/lib/kthxbye/exceptions.rb +4 -0
- data/lib/kthxbye/failure.rb +62 -0
- data/lib/kthxbye/helper.rb +42 -0
- data/lib/kthxbye/job.rb +127 -0
- data/lib/kthxbye/version.rb +5 -0
- data/lib/kthxbye/web_interface.rb +117 -0
- data/lib/kthxbye/web_interface/public/application.js +16 -0
- data/lib/kthxbye/web_interface/public/awesome-buttons.css +108 -0
- data/lib/kthxbye/web_interface/public/jquery.js +154 -0
- data/lib/kthxbye/web_interface/public/style.css +128 -0
- data/lib/kthxbye/web_interface/views/error.haml +5 -0
- data/lib/kthxbye/web_interface/views/failed.haml +26 -0
- data/lib/kthxbye/web_interface/views/hash.haml +6 -0
- data/lib/kthxbye/web_interface/views/layout.haml +33 -0
- data/lib/kthxbye/web_interface/views/overview.haml +2 -0
- data/lib/kthxbye/web_interface/views/queues.haml +31 -0
- data/lib/kthxbye/web_interface/views/set.haml +4 -0
- data/lib/kthxbye/web_interface/views/stats.haml +32 -0
- data/lib/kthxbye/web_interface/views/view_backtrace.haml +8 -0
- data/lib/kthxbye/web_interface/views/workers.haml +24 -0
- data/lib/kthxbye/web_interface/views/working.haml +19 -0
- data/lib/kthxbye/worker.rb +221 -0
- data/test/helper.rb +18 -0
- data/test/redis-test.conf +115 -0
- data/test/test_failure.rb +51 -0
- data/test/test_helper.rb +86 -0
- data/test/test_kthxbye.rb +213 -0
- data/test/test_worker.rb +148 -0
- metadata +364 -0
@@ -0,0 +1,19 @@
|
|
1
|
+
%h2 Active Workers
|
2
|
+
|
3
|
+
- unless Kthxbye.working.empty?
|
4
|
+
%table
|
5
|
+
%th Worker
|
6
|
+
%th Job
|
7
|
+
%th Time Started
|
8
|
+
- Kthxbye.working.each do |worker,job|
|
9
|
+
%tr
|
10
|
+
%td #{worker}
|
11
|
+
%td #{job['job_id']}
|
12
|
+
%td #{job['started']}
|
13
|
+
- else
|
14
|
+
%span{:style => "color:red"}
|
15
|
+
Currently no active workers.
|
16
|
+
|
17
|
+
%p
|
18
|
+
= toggle_poll
|
19
|
+
|
@@ -0,0 +1,221 @@
|
|
1
|
+
module Kthxbye
|
2
|
+
class Worker
|
3
|
+
include Helper
|
4
|
+
extend Helper
|
5
|
+
|
6
|
+
attr_accessor :sleep_for, :queues, :current_queue, :id
|
7
|
+
|
8
|
+
def initialize(queues, sleep_for=5)
|
9
|
+
setup_queues(queues)
|
10
|
+
@sleep_for = sleep_for
|
11
|
+
end
|
12
|
+
|
13
|
+
def setup_queues(queues)
|
14
|
+
if queues == "*"
|
15
|
+
@queues = Kthxbye.queues.sort
|
16
|
+
elsif queues.include? ?,
|
17
|
+
@queues = queues.split(",").compact
|
18
|
+
else
|
19
|
+
@queues = *queues
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def self.find(worker)
|
24
|
+
if exists? worker
|
25
|
+
qs = worker.split(':')[-1].split(",")
|
26
|
+
new_worker = new(*qs)
|
27
|
+
new_worker.id = worker
|
28
|
+
return new_worker
|
29
|
+
else
|
30
|
+
nil
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
def self.exists?(id)
|
35
|
+
redis.sismember( :workers, id )
|
36
|
+
end
|
37
|
+
|
38
|
+
# gets the job a given worker is working on
|
39
|
+
# returns a hash with the 'job_id' and the 'started' time
|
40
|
+
def self.working_on(id)
|
41
|
+
decode( redis.get( "worker:#{id}" ) )
|
42
|
+
end
|
43
|
+
|
44
|
+
# major run loop. workhorse of a worker... sort of.
|
45
|
+
# in the end, this loop simply runs the jobs in separate
|
46
|
+
# processes by forking out the process then waiting for it
|
47
|
+
# to return. we only process one
|
48
|
+
def run(&block)
|
49
|
+
log "Starting Kthxbye::Worker #{self}"
|
50
|
+
startup
|
51
|
+
|
52
|
+
loop do
|
53
|
+
break if @terminate
|
54
|
+
|
55
|
+
if !@paused and job = grab_job
|
56
|
+
log "Found job #{job}"
|
57
|
+
working(job)
|
58
|
+
|
59
|
+
@child = fork {
|
60
|
+
log "Forking..."
|
61
|
+
result = job.perform
|
62
|
+
yield job if block_given?
|
63
|
+
exit!
|
64
|
+
}
|
65
|
+
|
66
|
+
Process.wait
|
67
|
+
done
|
68
|
+
else
|
69
|
+
break if @sleep_for == 0
|
70
|
+
log "No jobs on #{@queues} - sleeping for #{@sleep_for}"
|
71
|
+
sleep sleep_for.to_i
|
72
|
+
end
|
73
|
+
end
|
74
|
+
ensure
|
75
|
+
unregister_worker
|
76
|
+
end
|
77
|
+
|
78
|
+
def queues
|
79
|
+
@queues.sort
|
80
|
+
end
|
81
|
+
|
82
|
+
# startup actions
|
83
|
+
def startup
|
84
|
+
register_worker
|
85
|
+
register_signals
|
86
|
+
end
|
87
|
+
|
88
|
+
# adds worker to the workers list
|
89
|
+
def register_worker
|
90
|
+
log "Registered worker #{self}"
|
91
|
+
redis.sadd( :workers, self ) if !exists?
|
92
|
+
end
|
93
|
+
|
94
|
+
# removes the worker from our workers list
|
95
|
+
def unregister_worker
|
96
|
+
log "Unregistered worker #{self}"
|
97
|
+
if working?
|
98
|
+
log "Was active. Reporting and rerunning"
|
99
|
+
Failure.create(current_job, ActiveWorkerKilled.new)
|
100
|
+
current_job.rerun
|
101
|
+
end
|
102
|
+
|
103
|
+
redis.del "worker:#{self}"
|
104
|
+
redis.srem :workers, self
|
105
|
+
end
|
106
|
+
|
107
|
+
def current_job
|
108
|
+
return @current_job if @current_job
|
109
|
+
data = decode( redis.get("worker:#{self}") )
|
110
|
+
@current_job = Job.find( data['job_id'], @current_queue )
|
111
|
+
end
|
112
|
+
|
113
|
+
# start working actions
|
114
|
+
def working(job)
|
115
|
+
redis.sadd( :working, self )
|
116
|
+
|
117
|
+
data = encode( {:job_id => job.id, :started => Time.now.to_s} )
|
118
|
+
redis.set("worker:#{self}", data)
|
119
|
+
@current_job = job
|
120
|
+
|
121
|
+
# activates job
|
122
|
+
job.active
|
123
|
+
end
|
124
|
+
|
125
|
+
# must be in working list and have a current job
|
126
|
+
def working?
|
127
|
+
redis.sismember( :working, self )
|
128
|
+
end
|
129
|
+
|
130
|
+
# job complete actions
|
131
|
+
def done
|
132
|
+
redis.srem( :working, self )
|
133
|
+
redis.del( "worker:#{self}" )
|
134
|
+
log "Completed job #{@current_job}"
|
135
|
+
@current_job = nil
|
136
|
+
end
|
137
|
+
|
138
|
+
#
|
139
|
+
# thanks to http://github.com/defunkt/resque/blob/master/lib/resque/worker.rb for these signals
|
140
|
+
#
|
141
|
+
def register_signals
|
142
|
+
trap('TERM') { shutdown! }
|
143
|
+
trap('INT') { shutdown! }
|
144
|
+
|
145
|
+
begin
|
146
|
+
trap('QUIT') { shutdown }
|
147
|
+
trap('USR1') { shutdown }
|
148
|
+
trap('USR2') { log "Paused"; @paused = true }
|
149
|
+
trap('CONT') { log "Unpaused"; @paused = false }
|
150
|
+
rescue ArgumentError
|
151
|
+
warn "Signals QUIT, USR1, USR2, and/or CONT not supported."
|
152
|
+
end
|
153
|
+
|
154
|
+
log "Registered signals"
|
155
|
+
end
|
156
|
+
|
157
|
+
def shutdown
|
158
|
+
log "Shutting down worker #{self}"
|
159
|
+
@terminate = true
|
160
|
+
end
|
161
|
+
|
162
|
+
def shutdown!
|
163
|
+
kill_child
|
164
|
+
shutdown
|
165
|
+
end
|
166
|
+
|
167
|
+
def kill_child
|
168
|
+
if @child
|
169
|
+
log "Killing child at #{@child}"
|
170
|
+
if system("ps -o pid,state -p #{@child}")
|
171
|
+
Process.kill("KILL", @child) rescue nil
|
172
|
+
else
|
173
|
+
log "Child #{@child} not found, restarting."
|
174
|
+
shutdown
|
175
|
+
end
|
176
|
+
end
|
177
|
+
end
|
178
|
+
|
179
|
+
def grab_job
|
180
|
+
job = nil
|
181
|
+
@queues.each do |q|
|
182
|
+
@current_queue = q
|
183
|
+
log "Checking \"#{q}\" queue for jobs"
|
184
|
+
job = Kthxbye.salvage(q)
|
185
|
+
break unless job.nil?
|
186
|
+
end
|
187
|
+
|
188
|
+
return job || false
|
189
|
+
end
|
190
|
+
|
191
|
+
def exists?
|
192
|
+
redis.sismember( :workers, self )
|
193
|
+
end
|
194
|
+
|
195
|
+
def hostname
|
196
|
+
@hostname ||= `hostname`.chomp
|
197
|
+
end
|
198
|
+
|
199
|
+
def pid
|
200
|
+
Process.pid
|
201
|
+
end
|
202
|
+
|
203
|
+
def id
|
204
|
+
@id ||= "#{hostname}:#{pid}:#{queues.join(",")}"
|
205
|
+
end
|
206
|
+
alias_method :to_s, :id
|
207
|
+
|
208
|
+
def inspect
|
209
|
+
"#<Worker: #{@id}>"
|
210
|
+
end
|
211
|
+
|
212
|
+
def ==(other)
|
213
|
+
to_s == other.to_s
|
214
|
+
end
|
215
|
+
|
216
|
+
def <=>(other)
|
217
|
+
to_s <=> other.to_s
|
218
|
+
end
|
219
|
+
|
220
|
+
end
|
221
|
+
end
|
data/test/helper.rb
ADDED
@@ -0,0 +1,18 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'bundler'
|
3
|
+
begin
|
4
|
+
Bundler.setup(:default, :development)
|
5
|
+
rescue Bundler::BundlerError => e
|
6
|
+
$stderr.puts e.message
|
7
|
+
$stderr.puts "Run `bundle install` to install missing gems"
|
8
|
+
exit e.status_code
|
9
|
+
end
|
10
|
+
require 'test/unit'
|
11
|
+
require 'shoulda'
|
12
|
+
|
13
|
+
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
|
14
|
+
$LOAD_PATH.unshift(File.dirname(__FILE__))
|
15
|
+
require 'kthxbye'
|
16
|
+
|
17
|
+
class Test::Unit::TestCase
|
18
|
+
end
|
@@ -0,0 +1,115 @@
|
|
1
|
+
# Redis configuration file example
|
2
|
+
|
3
|
+
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
4
|
+
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
5
|
+
daemonize yes
|
6
|
+
|
7
|
+
# When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
|
8
|
+
# You can specify a custom pid file location here.
|
9
|
+
pidfile ./test/redis-test.pid
|
10
|
+
|
11
|
+
# Accept connections on the specified port, default is 6379
|
12
|
+
port 9876
|
13
|
+
|
14
|
+
# If you want you can bind a single interface, if the bind option is not
|
15
|
+
# specified all the interfaces will listen for connections.
|
16
|
+
#
|
17
|
+
# bind 127.0.0.1
|
18
|
+
|
19
|
+
# Close the connection after a client is idle for N seconds (0 to disable)
|
20
|
+
timeout 300
|
21
|
+
|
22
|
+
# Save the DB on disk:
|
23
|
+
#
|
24
|
+
# save <seconds> <changes>
|
25
|
+
#
|
26
|
+
# Will save the DB if both the given number of seconds and the given
|
27
|
+
# number of write operations against the DB occurred.
|
28
|
+
#
|
29
|
+
# In the example below the behaviour will be to save:
|
30
|
+
# after 900 sec (15 min) if at least 1 key changed
|
31
|
+
# after 300 sec (5 min) if at least 10 keys changed
|
32
|
+
# after 60 sec if at least 10000 keys changed
|
33
|
+
save 900 1
|
34
|
+
save 300 10
|
35
|
+
save 60 10000
|
36
|
+
|
37
|
+
# The filename where to dump the DB
|
38
|
+
dbfilename dump.rdb
|
39
|
+
|
40
|
+
# For default save/load DB in/from the working directory
|
41
|
+
# Note that you must specify a directory not a file name.
|
42
|
+
dir ./test/
|
43
|
+
|
44
|
+
# Set server verbosity to 'debug'
|
45
|
+
# it can be one of:
|
46
|
+
# debug (a lot of information, useful for development/testing)
|
47
|
+
# notice (moderately verbose, what you want in production probably)
|
48
|
+
# warning (only very important / critical messages are logged)
|
49
|
+
loglevel debug
|
50
|
+
|
51
|
+
# Specify the log file name. Also 'stdout' can be used to force
|
52
|
+
# the demon to log on the standard output. Note that if you use standard
|
53
|
+
# output for logging but daemonize, logs will be sent to /dev/null
|
54
|
+
logfile stdout
|
55
|
+
|
56
|
+
# Set the number of databases. The default database is DB 0, you can select
|
57
|
+
# a different one on a per-connection basis using SELECT <dbid> where
|
58
|
+
# dbid is a number between 0 and 'databases'-1
|
59
|
+
databases 16
|
60
|
+
|
61
|
+
################################# REPLICATION #################################
|
62
|
+
|
63
|
+
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
64
|
+
# another Redis server. Note that the configuration is local to the slave
|
65
|
+
# so for example it is possible to configure the slave to save the DB with a
|
66
|
+
# different interval, or to listen to another port, and so on.
|
67
|
+
|
68
|
+
# slaveof <masterip> <masterport>
|
69
|
+
|
70
|
+
################################## SECURITY ###################################
|
71
|
+
|
72
|
+
# Require clients to issue AUTH <PASSWORD> before processing any other
|
73
|
+
# commands. This might be useful in environments in which you do not trust
|
74
|
+
# others with access to the host running redis-server.
|
75
|
+
#
|
76
|
+
# This should stay commented out for backward compatibility and because most
|
77
|
+
# people do not need auth (e.g. they run their own servers).
|
78
|
+
|
79
|
+
# requirepass foobared
|
80
|
+
|
81
|
+
################################### LIMITS ####################################
|
82
|
+
|
83
|
+
# Set the max number of connected clients at the same time. By default there
|
84
|
+
# is no limit, and it's up to the number of file descriptors the Redis process
|
85
|
+
# is able to open. The special value '0' means no limts.
|
86
|
+
# Once the limit is reached Redis will close all the new connections sending
|
87
|
+
# an error 'max number of clients reached'.
|
88
|
+
|
89
|
+
# maxclients 128
|
90
|
+
|
91
|
+
# Don't use more memory than the specified amount of bytes.
|
92
|
+
# When the memory limit is reached Redis will try to remove keys with an
|
93
|
+
# EXPIRE set. It will try to start freeing keys that are going to expire
|
94
|
+
# in little time and preserve keys with a longer time to live.
|
95
|
+
# Redis will also try to remove objects from free lists if possible.
|
96
|
+
#
|
97
|
+
# If all this fails, Redis will start to reply with errors to commands
|
98
|
+
# that will use more memory, like SET, LPUSH, and so on, and will continue
|
99
|
+
# to reply to most read-only commands like GET.
|
100
|
+
#
|
101
|
+
# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
|
102
|
+
# 'state' server or cache, not as a real DB. When Redis is used as a real
|
103
|
+
# database the memory usage will grow over the weeks, it will be obvious if
|
104
|
+
# it is going to use too much memory in the long run, and you'll have the time
|
105
|
+
# to upgrade. With maxmemory after the limit is reached you'll start to get
|
106
|
+
# errors for write operations, and this may even lead to DB inconsistency.
|
107
|
+
|
108
|
+
# maxmemory <bytes>
|
109
|
+
|
110
|
+
############################### ADVANCED CONFIG ###############################
|
111
|
+
|
112
|
+
# Glue small output buffers together in order to send small replies in a
|
113
|
+
# single TCP packet. Uses a bit more CPU but most of the times it is a win
|
114
|
+
# in terms of number of queries per second. Use 'yes' if unsure.
|
115
|
+
glueoutputbuf yes
|
@@ -0,0 +1,51 @@
|
|
1
|
+
require 'test_helper'
|
2
|
+
|
3
|
+
class TestFailure < Test::Unit::TestCase
|
4
|
+
context "See Kthxbye Failures" do
|
5
|
+
setup do
|
6
|
+
Kthxbye.redis.flushall
|
7
|
+
end
|
8
|
+
|
9
|
+
should "create a failure" do
|
10
|
+
id = Kthxbye.enqueue("test", SimpleJob, 1, 2)
|
11
|
+
assert Kthxbye::Failure.create(Kthxbye::Job.find(id, "test"), Exception.new("Test!"))
|
12
|
+
|
13
|
+
assert_not_nil f = Kthxbye::Failure.all.first
|
14
|
+
assert_equal "Exception", f['type']
|
15
|
+
assert_equal "Test!", f['error']
|
16
|
+
assert_equal id, f['job']
|
17
|
+
end
|
18
|
+
|
19
|
+
should "insert and clear an exception" do
|
20
|
+
id = Kthxbye.enqueue("test", SimpleJob, 1, 2)
|
21
|
+
Kthxbye::Failure.create(Kthxbye::Job.find(id, "test"), Exception.new("Test!"))
|
22
|
+
|
23
|
+
|
24
|
+
assert_equal 1, Kthxbye::Failure.all.size
|
25
|
+
assert Kthxbye::Failure.clear_exception(id)
|
26
|
+
assert_equal 0, Kthxbye::Failure.all.size
|
27
|
+
end
|
28
|
+
|
29
|
+
should "retry a job that failed" do
|
30
|
+
id = Kthxbye.enqueue("test", BadJob)
|
31
|
+
worker = Kthxbye::Worker.new("test", 0)
|
32
|
+
worker.run
|
33
|
+
|
34
|
+
assert_equal 1, Kthxbye::Failure.all.size
|
35
|
+
assert_equal 1, Kthxbye::Failure.find(id)['attempts']
|
36
|
+
|
37
|
+
Kthxbye::Job.find(id, "test").rerun
|
38
|
+
assert_equal 1, Kthxbye.size("test")
|
39
|
+
worker.run do
|
40
|
+
assert_equal Kthxbye::Job.find(id, "test"), worker.current_job
|
41
|
+
end
|
42
|
+
|
43
|
+
# note, we only store one error for a job failure. we will increment the failure count
|
44
|
+
assert_equal 1, Kthxbye::Failure.all.size
|
45
|
+
assert_equal 2, Kthxbye::Failure.find(id)['attempts']
|
46
|
+
|
47
|
+
end
|
48
|
+
|
49
|
+
|
50
|
+
end
|
51
|
+
end
|
data/test/test_helper.rb
ADDED
@@ -0,0 +1,86 @@
|
|
1
|
+
#
|
2
|
+
# setup code used and modified from http://github.com/defunkt/resque/blob/master/test/test_helper.rb
|
3
|
+
#
|
4
|
+
require 'rubygems'
|
5
|
+
require 'test/unit'
|
6
|
+
require 'shoulda'
|
7
|
+
|
8
|
+
$LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
|
9
|
+
$LOAD_PATH.unshift(File.dirname(__FILE__))
|
10
|
+
require 'kthxbye'
|
11
|
+
|
12
|
+
dir = File.dirname(File.expand_path(__FILE__))
|
13
|
+
require 'test/unit'
|
14
|
+
require 'rubygems'
|
15
|
+
|
16
|
+
|
17
|
+
# make sure we can run redis
|
18
|
+
#
|
19
|
+
|
20
|
+
if !system("which redis-server")
|
21
|
+
puts '', "** can't find `redis-server` in your path"
|
22
|
+
puts "** try running `sudo rake install`"
|
23
|
+
abort ''
|
24
|
+
end
|
25
|
+
|
26
|
+
|
27
|
+
#
|
28
|
+
# start our own redis when the tests start,
|
29
|
+
# kill it when they end
|
30
|
+
#
|
31
|
+
|
32
|
+
at_exit do
|
33
|
+
next if $!
|
34
|
+
|
35
|
+
if defined?(MiniTest)
|
36
|
+
exit_code = MiniTest::Unit.new.run(ARGV)
|
37
|
+
else
|
38
|
+
exit_code = Test::Unit::AutoRunner.run
|
39
|
+
end
|
40
|
+
|
41
|
+
pid = `ps -A -o pid,command | grep [r]edis-test`.split(" ")[0]
|
42
|
+
puts "Killing test redis server..."
|
43
|
+
`rm -f #{dir}/dump.rdb`
|
44
|
+
Process.kill("KILL", pid.to_i)
|
45
|
+
exit exit_code
|
46
|
+
end
|
47
|
+
|
48
|
+
puts "Starting redis for testing at localhost:9876..."
|
49
|
+
`redis-server #{dir}/redis-test.conf`
|
50
|
+
|
51
|
+
class SimpleJob
|
52
|
+
def self.perform(p1, p2)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
class SomeQueueJob < SimpleJob
|
57
|
+
@queue = :test
|
58
|
+
end
|
59
|
+
|
60
|
+
class BadJob
|
61
|
+
def self.perform
|
62
|
+
raise "Bad job!"
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
class GoodJob
|
67
|
+
def self.perform(name)
|
68
|
+
"Good job, #{name}"
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
class LongJob
|
73
|
+
def self.perform(data)
|
74
|
+
sleep 10
|
75
|
+
puts "I just slept for 10 seconds with #{data} keeping me company"
|
76
|
+
data.gsub(" ", "_")
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
|
81
|
+
class BadJobWithSyntaxError
|
82
|
+
def self.perform
|
83
|
+
raise SyntaxError, "Extra Bad job!"
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|