rq-ruby1.8 3.4.3
Sign up to get free protection for your applications and to get access to all the features.
- data/Gemfile +22 -0
- data/Gemfile.lock +22 -0
- data/INSTALL +166 -0
- data/LICENSE +10 -0
- data/Makefile +6 -0
- data/README +1183 -0
- data/Rakefile +37 -0
- data/TODO +24 -0
- data/TUTORIAL +230 -0
- data/VERSION +1 -0
- data/bin/rq +902 -0
- data/bin/rqmailer +865 -0
- data/example/a.rb +7 -0
- data/extconf.rb +198 -0
- data/gemspec.rb +40 -0
- data/install.rb +210 -0
- data/lib/rq.rb +155 -0
- data/lib/rq/arrayfields.rb +371 -0
- data/lib/rq/backer.rb +31 -0
- data/lib/rq/configfile.rb +82 -0
- data/lib/rq/configurator.rb +40 -0
- data/lib/rq/creator.rb +54 -0
- data/lib/rq/cron.rb +144 -0
- data/lib/rq/defaultconfig.txt +5 -0
- data/lib/rq/deleter.rb +51 -0
- data/lib/rq/executor.rb +40 -0
- data/lib/rq/feeder.rb +527 -0
- data/lib/rq/ioviewer.rb +48 -0
- data/lib/rq/job.rb +51 -0
- data/lib/rq/jobqueue.rb +947 -0
- data/lib/rq/jobrunner.rb +110 -0
- data/lib/rq/jobrunnerdaemon.rb +193 -0
- data/lib/rq/lister.rb +47 -0
- data/lib/rq/locker.rb +43 -0
- data/lib/rq/lockfile.rb +564 -0
- data/lib/rq/logging.rb +124 -0
- data/lib/rq/mainhelper.rb +189 -0
- data/lib/rq/orderedautohash.rb +39 -0
- data/lib/rq/orderedhash.rb +240 -0
- data/lib/rq/qdb.rb +733 -0
- data/lib/rq/querier.rb +98 -0
- data/lib/rq/rails.rb +80 -0
- data/lib/rq/recoverer.rb +28 -0
- data/lib/rq/refresher.rb +80 -0
- data/lib/rq/relayer.rb +283 -0
- data/lib/rq/resource.rb +22 -0
- data/lib/rq/resourcemanager.rb +40 -0
- data/lib/rq/resubmitter.rb +100 -0
- data/lib/rq/rotater.rb +98 -0
- data/lib/rq/sleepcycle.rb +46 -0
- data/lib/rq/snapshotter.rb +40 -0
- data/lib/rq/sqlite.rb +286 -0
- data/lib/rq/statuslister.rb +48 -0
- data/lib/rq/submitter.rb +113 -0
- data/lib/rq/toucher.rb +182 -0
- data/lib/rq/updater.rb +94 -0
- data/lib/rq/usage.rb +1222 -0
- data/lib/rq/util.rb +304 -0
- data/rdoc.sh +17 -0
- data/rq-ruby1.8.gemspec +120 -0
- data/test/.gitignore +1 -0
- data/test/test_rq.rb +145 -0
- data/white_box/crontab +2 -0
- data/white_box/joblist +8 -0
- data/white_box/killrq +18 -0
- data/white_box/rq_killer +27 -0
- metadata +208 -0
@@ -0,0 +1,48 @@
|
|
1
|
+
unless defined? $__rq_statuslister__
|
2
|
+
module RQ
|
3
|
+
#--{{{
|
4
|
+
LIBDIR = File::dirname(File::expand_path(__FILE__)) + File::SEPARATOR unless
|
5
|
+
defined? LIBDIR
|
6
|
+
|
7
|
+
require LIBDIR + 'mainhelper'
|
8
|
+
|
9
|
+
#
|
10
|
+
# the StatusLister class dumps a yaml report on stdout showing how many jobs
|
11
|
+
# are in each of the states
|
12
|
+
# * pending
|
13
|
+
# * holding
|
14
|
+
# * running
|
15
|
+
# * finished
|
16
|
+
# * dead
|
17
|
+
#
|
18
|
+
class StatusLister < MainHelper
|
19
|
+
#--{{{
|
20
|
+
def statuslist
|
21
|
+
#--{{{
|
22
|
+
set_q
|
23
|
+
exit_code_map = parse_exit_code_map @options['exit']
|
24
|
+
puts @q.status('exit_code_map' => exit_code_map).to_yaml
|
25
|
+
#--}}}
|
26
|
+
end
|
27
|
+
def parse_exit_code_map emap = 'ok=42'
|
28
|
+
emap ||= 'ok=42'
|
29
|
+
map = {}
|
30
|
+
begin
|
31
|
+
tokens = emap.strip.gsub(%r/\s+/, ' ').gsub(%r/\s*=\s*/, '=').split
|
32
|
+
tokens.each do |token|
|
33
|
+
key, *values = token.split %r/[=,]/
|
34
|
+
values.map!{|value| Integer value}
|
35
|
+
map[key.to_s] = values
|
36
|
+
end
|
37
|
+
rescue => e
|
38
|
+
warn{ e }
|
39
|
+
raise "bad map <#{ emap }"
|
40
|
+
end
|
41
|
+
map
|
42
|
+
end
|
43
|
+
#--}}}
|
44
|
+
end # class StatusLister
|
45
|
+
#--}}}
|
46
|
+
end # module RQ
|
47
|
+
$__rq_statuslister__ = __FILE__
|
48
|
+
end
|
data/lib/rq/submitter.rb
ADDED
@@ -0,0 +1,113 @@
|
|
1
|
+
unless defined? $__rq_submitter__
|
2
|
+
module RQ
|
3
|
+
#--{{{
|
4
|
+
LIBDIR = File::dirname(File::expand_path(__FILE__)) + File::SEPARATOR unless
|
5
|
+
defined? LIBDIR
|
6
|
+
|
7
|
+
require LIBDIR + 'mainhelper'
|
8
|
+
require LIBDIR + 'job'
|
9
|
+
|
10
|
+
#
|
11
|
+
# the Submitter class is responsible for submitting commands to the queue,
|
12
|
+
# the commands it submits are taken from the command line, stdin, or the
|
13
|
+
# specified infile. the format of commands read from stdin or file is
|
14
|
+
# either a simple list of commands, one per line, where blank lines are
|
15
|
+
# ignored OR it is valid yaml input. if the Submitter sees the token
|
16
|
+
# '---' in the input stream it is assumed the input is yaml. for an
|
17
|
+
# example of valid yaml input examine the output of a Lister using
|
18
|
+
#
|
19
|
+
# rq q list
|
20
|
+
#
|
21
|
+
# the output of other commands, such as that of a Querier may also be used
|
22
|
+
# as input to submit
|
23
|
+
#
|
24
|
+
class Submitter < MainHelper
|
25
|
+
#--{{{
|
26
|
+
def submit
|
27
|
+
#--{{{
|
28
|
+
set_q
|
29
|
+
|
30
|
+
@priority = @options['priority']
|
31
|
+
debug{ "priority <#{ @priority }>" }
|
32
|
+
|
33
|
+
@tag = @options['tag']
|
34
|
+
debug{ "tag <#{ @tag }>" }
|
35
|
+
|
36
|
+
@runner = @options['runner']
|
37
|
+
debug{ "runner <#{ @runner }>" }
|
38
|
+
|
39
|
+
@restartable = @options['restartable']
|
40
|
+
debug{ "restartable <#{ @restartable }>" }
|
41
|
+
|
42
|
+
@infile = @options['infile']
|
43
|
+
debug{ "infile <#{ @infile }>" }
|
44
|
+
|
45
|
+
@job_stdin = @options['stdin']
|
46
|
+
debug{ "job_stdin <#{ @job_stdin }>" }
|
47
|
+
|
48
|
+
@stage = @options['stage']
|
49
|
+
debug{ "stage <#{ @stage }>" }
|
50
|
+
|
51
|
+
@data = @options['data']
|
52
|
+
debug{ "data <#{ @data }>" }
|
53
|
+
|
54
|
+
if job_stdin == '-' and stdin?
|
55
|
+
abort "cannot specify both jobs and job input on stdin"
|
56
|
+
end
|
57
|
+
|
58
|
+
jobs = []
|
59
|
+
|
60
|
+
unless @argv.empty?
|
61
|
+
job = Job::new
|
62
|
+
job['command'] = @argv.join(' ')
|
63
|
+
job['priority'] = @priority
|
64
|
+
job['tag'] = @tag
|
65
|
+
job['runner'] = @runner
|
66
|
+
job['restartable'] = @restartable
|
67
|
+
jobs << job
|
68
|
+
end
|
69
|
+
|
70
|
+
if @infile
|
71
|
+
open(@infile) do |f|
|
72
|
+
debug{ "reading jobs from <#{ @infile }>" }
|
73
|
+
loadio f, @infile, jobs
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
if jobs.empty? and stdin?
|
78
|
+
debug{ "reading jobs from <stdin>" }
|
79
|
+
loadio stdin, 'stdin', jobs
|
80
|
+
end
|
81
|
+
|
82
|
+
abort "no jobs specified!" if jobs.empty?
|
83
|
+
|
84
|
+
init_job_stdin!
|
85
|
+
|
86
|
+
state = @stage ? 'holding' : 'pending'
|
87
|
+
|
88
|
+
jobs.each do |job|
|
89
|
+
job['state'] = state
|
90
|
+
job['priority'] = @priority if @options.has_key?('priority')
|
91
|
+
job['tag'] = @tag if @options.has_key?('tag')
|
92
|
+
job['runner'] = @runner if @options.has_key?('runner')
|
93
|
+
job['restartable'] = @restartable if @options.has_key?('restartable')
|
94
|
+
job['stdin'] = @job_stdin if @job_stdin
|
95
|
+
job['data'] = @data if @data
|
96
|
+
end
|
97
|
+
|
98
|
+
if @options['quiet']
|
99
|
+
@q.submit(*jobs)
|
100
|
+
else
|
101
|
+
@q.submit(*jobs, &dumping_yaml_tuples)
|
102
|
+
end
|
103
|
+
|
104
|
+
jobs = nil
|
105
|
+
self
|
106
|
+
#--}}}
|
107
|
+
end
|
108
|
+
#--}}}
|
109
|
+
end # class Submitter
|
110
|
+
#--}}}
|
111
|
+
end # module RQ
|
112
|
+
$__rq_submitter__ = __FILE__
|
113
|
+
end
|
data/lib/rq/toucher.rb
ADDED
@@ -0,0 +1,182 @@
|
|
1
|
+
unless defined? $__rq_toucher__
|
2
|
+
module RQ
|
3
|
+
#--{{{
|
4
|
+
LIBDIR = File::dirname(File::expand_path(__FILE__)) + File::SEPARATOR unless
|
5
|
+
defined? LIBDIR
|
6
|
+
|
7
|
+
require LIBDIR + 'mainhelper'
|
8
|
+
require LIBDIR + 'job'
|
9
|
+
|
10
|
+
#
|
11
|
+
# nodoc
|
12
|
+
#
|
13
|
+
class Toucher < MainHelper
|
14
|
+
#--{{{
|
15
|
+
def touch
|
16
|
+
#--{{{
|
17
|
+
set_q
|
18
|
+
|
19
|
+
@priority = @options['priority']
|
20
|
+
debug{ "priority <#{ @priority }>" }
|
21
|
+
|
22
|
+
@tag = @options['tag']
|
23
|
+
debug{ "tag <#{ @tag }>" }
|
24
|
+
|
25
|
+
@runner = @options['runner']
|
26
|
+
debug{ "runner <#{ @runner }>" }
|
27
|
+
|
28
|
+
@restartable = @options['restartable']
|
29
|
+
debug{ "restartable <#{ @restartable }>" }
|
30
|
+
|
31
|
+
@infile = @options['infile']
|
32
|
+
debug{ "infile <#{ @infile }>" }
|
33
|
+
|
34
|
+
@job_stdin = @options['stdin']
|
35
|
+
debug{ "job_stdin <#{ @job_stdin }>" }
|
36
|
+
|
37
|
+
@stage = @options['stage']
|
38
|
+
debug{ "stage <#{ @stage }>" }
|
39
|
+
|
40
|
+
@data = @options['data']
|
41
|
+
debug{ "data <#{ @data }>" }
|
42
|
+
|
43
|
+
if job_stdin == '-' and stdin?
|
44
|
+
abort "cannot specify both jobs and job input on stdin"
|
45
|
+
end
|
46
|
+
|
47
|
+
jobs = []
|
48
|
+
|
49
|
+
unless @argv.empty?
|
50
|
+
job = Job::new
|
51
|
+
job['command'] = @argv.join(' ')
|
52
|
+
job['priority'] = @priority
|
53
|
+
job['tag'] = @tag
|
54
|
+
job['runner'] = @runner
|
55
|
+
job['restartable'] = @restartable
|
56
|
+
job['data'] = @data
|
57
|
+
jobs << job
|
58
|
+
end
|
59
|
+
|
60
|
+
if @infile
|
61
|
+
open(@infile) do |f|
|
62
|
+
debug{ "reading jobs from <#{ @infile }>" }
|
63
|
+
loadio f, @infile, jobs
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
if jobs.empty? and stdin?
|
68
|
+
debug{ "reading jobs from <stdin>" }
|
69
|
+
loadio stdin, 'stdin', jobs
|
70
|
+
end
|
71
|
+
|
72
|
+
abort "no jobs specified!" if jobs.empty?
|
73
|
+
|
74
|
+
init_job_stdin!
|
75
|
+
|
76
|
+
state = @stage ? 'holding' : 'pending'
|
77
|
+
|
78
|
+
jobs.each do |job|
|
79
|
+
job['state'] = state
|
80
|
+
job['priority'] = @priority if @options.has_key?('priority')
|
81
|
+
job['tag'] = @tag if @options.has_key?('tag')
|
82
|
+
job['runner'] = @runner if @options.has_key?('runner')
|
83
|
+
job['restartable'] = @restartable if @options.has_key?('restartable')
|
84
|
+
job['stdin'] = @job_stdin if @job_stdin
|
85
|
+
job['data'] = @data if @data
|
86
|
+
end
|
87
|
+
|
88
|
+
#
|
89
|
+
# state + lambdas for submit process...
|
90
|
+
#
|
91
|
+
|
92
|
+
list = []
|
93
|
+
|
94
|
+
tmpfile =
|
95
|
+
lambda do |basename|
|
96
|
+
basename = File.basename basename.to_s
|
97
|
+
Tempfile.new "#{ basename }_#{ Process.pid }_#{ rand.to_s }"
|
98
|
+
end
|
99
|
+
|
100
|
+
update_job =
|
101
|
+
lambda do |pjob, ujob|
|
102
|
+
kvs, jid = {}, pjob['jid']
|
103
|
+
# handle stdin
|
104
|
+
pstdin, ustdin = pjob['stdin'], ujob['stdin']
|
105
|
+
if pstdin || ustdin
|
106
|
+
pbuf =
|
107
|
+
if pstdin
|
108
|
+
pstdin = @q.standard_in_4 jid
|
109
|
+
IO.read pstdin if test ?e, pstdin
|
110
|
+
end
|
111
|
+
ubuf =
|
112
|
+
if ustdin
|
113
|
+
IO.read ustdin if test ?e, ustdin
|
114
|
+
end
|
115
|
+
#y 'pbuf' => pbuf
|
116
|
+
#y 'ubuf' => ubuf
|
117
|
+
f = ustdin ? open(ustdin,'w') : tmpfile[ustdin]
|
118
|
+
begin
|
119
|
+
f.write pbuf if pbuf
|
120
|
+
f.write ubuf if pbuf
|
121
|
+
ensure
|
122
|
+
f.close
|
123
|
+
end
|
124
|
+
kvs['stdin'] = ujob['stdin'] = f.path
|
125
|
+
#y 'stdin' => ujob['stdin']
|
126
|
+
end
|
127
|
+
# handle other keys
|
128
|
+
allowed = %w( priority runner restartable )
|
129
|
+
allowed.each do |key|
|
130
|
+
val = ujob[key]
|
131
|
+
kvs[key] = val if val
|
132
|
+
end
|
133
|
+
@q.update(kvs, jid){|updated| list << updated}
|
134
|
+
end
|
135
|
+
|
136
|
+
submit_job =
|
137
|
+
lambda do |job|
|
138
|
+
@q.submit(job){|submitted| list << submitted}
|
139
|
+
end
|
140
|
+
|
141
|
+
|
142
|
+
#
|
143
|
+
# update or submit
|
144
|
+
#
|
145
|
+
@q.transaction do
|
146
|
+
pending = @q.list 'pending'
|
147
|
+
|
148
|
+
pjobs, pcommands = {}, {}
|
149
|
+
|
150
|
+
pending.each do |job|
|
151
|
+
jid = job['jid']
|
152
|
+
command = job['command'].strip
|
153
|
+
tag = job['tag'].to_s.strip
|
154
|
+
pjobs[jid] = job
|
155
|
+
pcommands[[command, tag]] = jid
|
156
|
+
end
|
157
|
+
|
158
|
+
jobs.each do |job|
|
159
|
+
jid = job['jid']
|
160
|
+
command = job['command'].strip
|
161
|
+
tag = job['tag'].to_s.strip
|
162
|
+
if((jid = pcommands[[command, tag]]))
|
163
|
+
update_job[ pjobs[jid], job ]
|
164
|
+
else
|
165
|
+
submit_job[ job ]
|
166
|
+
end
|
167
|
+
end
|
168
|
+
end
|
169
|
+
|
170
|
+
list.each &dumping_yaml_tuples unless @options['quiet']
|
171
|
+
|
172
|
+
jobs = nil
|
173
|
+
list = nil
|
174
|
+
self
|
175
|
+
#--}}}
|
176
|
+
end
|
177
|
+
#--}}}
|
178
|
+
end # class Toucher
|
179
|
+
#--}}}
|
180
|
+
end # module RQ
|
181
|
+
$__rq_toucher__ = __FILE__
|
182
|
+
end
|
data/lib/rq/updater.rb
ADDED
@@ -0,0 +1,94 @@
|
|
1
|
+
unless defined? $__rq_updater__
|
2
|
+
module RQ
|
3
|
+
#--{{{
|
4
|
+
LIBDIR = File::dirname(File::expand_path(__FILE__)) + File::SEPARATOR unless
|
5
|
+
defined? LIBDIR
|
6
|
+
|
7
|
+
require LIBDIR + 'mainhelper'
|
8
|
+
|
9
|
+
#
|
10
|
+
# the Updater class reads jids from the command line and then looks for
|
11
|
+
# key=value pairs on the command line, stdin, or from infile. the jids are
|
12
|
+
# taken to be jids to update with the key=values pairs scanned
|
13
|
+
#
|
14
|
+
class Updater < MainHelper
|
15
|
+
#--{{{
|
16
|
+
def update
|
17
|
+
#--{{{
|
18
|
+
set_q
|
19
|
+
jids = []
|
20
|
+
kvs = {}
|
21
|
+
|
22
|
+
kvs.update "stdin" => job_stdin if job_stdin?
|
23
|
+
|
24
|
+
#
|
25
|
+
# scan argv for jids to update
|
26
|
+
#
|
27
|
+
list, @argv = @argv.partition{|arg| arg =~ %r/^\s*(?:jid\s*=\s*)?\d+\s*$/}
|
28
|
+
list.each{|elem| jids << Integer(elem[%r/\d+/])}
|
29
|
+
list, @argv = @argv.partition{|arg| arg =~ %r/^\s*(?:p(?:ending)|h(?:olding))\s*$/}
|
30
|
+
list.each{|elem| jids << elem.strip.downcase}
|
31
|
+
#
|
32
|
+
# scan argv for key=val pairs
|
33
|
+
#
|
34
|
+
keyeqpat = %r/\s*([^\s=]+)\s*=\s*([^\s]*)\s*$/
|
35
|
+
list, @argv = @argv.partition{|arg| arg =~ keyeqpat}
|
36
|
+
list.each do |elem|
|
37
|
+
m = elem.match(keyeqpat)
|
38
|
+
k, v = m[1], m[2]
|
39
|
+
k = (k.empty? ? nil : k.strip)
|
40
|
+
v = (v.empty? ? nil : v.strip)
|
41
|
+
v =
|
42
|
+
case v
|
43
|
+
when %r/^\s*(?:nil|null?)\s*$/io
|
44
|
+
nil
|
45
|
+
else
|
46
|
+
v
|
47
|
+
end
|
48
|
+
kvs[k] = v
|
49
|
+
end
|
50
|
+
|
51
|
+
unless @argv.empty?
|
52
|
+
raise "don't know what to do with crap arguments <#{ @argv.join ' ' }>"
|
53
|
+
end
|
54
|
+
|
55
|
+
#
|
56
|
+
# scan stdin for jids to update iff in pipeline
|
57
|
+
#
|
58
|
+
if stdin?
|
59
|
+
#pat = %r/^(?:\s*jid\s*:)?\s*(\d+)\s*$/io
|
60
|
+
while((line = stdin.gets))
|
61
|
+
case line
|
62
|
+
when %r/^(?:\s*jid\s*:)?\s*(\d+)\s*$/io
|
63
|
+
jids << Integer($1)
|
64
|
+
when %r/^\s*p(?:ending)\s*$/io
|
65
|
+
jids << 'pending'
|
66
|
+
when %r/^\s*h(?:olding)\s*$/io
|
67
|
+
jids << 'holding'
|
68
|
+
else
|
69
|
+
next
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
#jids.map!{|jid| jid =~ %r/^\s*\d+\s*$/o ? Integer(jid) : jid}
|
74
|
+
#raise "no jids" if jids.empty?
|
75
|
+
#
|
76
|
+
# if no jids were specified simply update ALL pending and holding jobs
|
77
|
+
#
|
78
|
+
jids << 'pending' << 'holding' if jids.empty?
|
79
|
+
#
|
80
|
+
# apply the update
|
81
|
+
#
|
82
|
+
if @options['quiet']
|
83
|
+
@q.update(kvs,*jids)
|
84
|
+
else
|
85
|
+
@q.update(kvs,*jids, &dumping_yaml_tuples)
|
86
|
+
end
|
87
|
+
#--}}}
|
88
|
+
end
|
89
|
+
#--}}}
|
90
|
+
end # class Updater
|
91
|
+
#--}}}
|
92
|
+
end # module RQ
|
93
|
+
$__rq_updater__ = __FILE__
|
94
|
+
end
|
data/lib/rq/usage.rb
ADDED
@@ -0,0 +1,1222 @@
|
|
1
|
+
unless defined? $__rq_usage__
|
2
|
+
module RQ
|
3
|
+
#--{{{
|
4
|
+
LIBDIR = File::dirname(File::expand_path(__FILE__)) + File::SEPARATOR unless
|
5
|
+
defined? LIBDIR
|
6
|
+
|
7
|
+
require LIBDIR + 'util'
|
8
|
+
|
9
|
+
#
|
10
|
+
# the reasons this is pulled off into it's own module are
|
11
|
+
# * it's really big
|
12
|
+
# * it totally wrecks vim's syntax highlighting
|
13
|
+
#
|
14
|
+
module Usage
|
15
|
+
#--{{{
|
16
|
+
def cget const
|
17
|
+
#--{{{
|
18
|
+
begin
|
19
|
+
klass::const_get const
|
20
|
+
rescue NameError
|
21
|
+
nil
|
22
|
+
end
|
23
|
+
#--}}}
|
24
|
+
end
|
25
|
+
def usage opts = {}
|
26
|
+
#--{{{
|
27
|
+
port = getopt 'port', opts
|
28
|
+
long = getopt 'long', opts
|
29
|
+
|
30
|
+
port = STDERR if port.nil?
|
31
|
+
|
32
|
+
if(long and (txt = cget 'USAGE'))
|
33
|
+
port << txt << "\n"
|
34
|
+
elsif((txt = cget 'USAGE_BANNER'))
|
35
|
+
port << txt << "\n"
|
36
|
+
else
|
37
|
+
port << "#{ $0 } [options]* [args]*" << "\n"
|
38
|
+
end
|
39
|
+
|
40
|
+
if((optspec = cget 'OPTSPEC'))
|
41
|
+
port << 'OPTIONS' << "\n"
|
42
|
+
optspec.each do |os|
|
43
|
+
a, b, c = os
|
44
|
+
long, short, desc = nil
|
45
|
+
[a,b,c].each do |word|
|
46
|
+
next unless word
|
47
|
+
word.strip!
|
48
|
+
case word
|
49
|
+
when %r/^--[^-]/o
|
50
|
+
long = word
|
51
|
+
when %r/^-[^-]/o
|
52
|
+
short = word
|
53
|
+
else
|
54
|
+
desc = word
|
55
|
+
end
|
56
|
+
end
|
57
|
+
spec = ((long and short) ? [long, short] : [long])
|
58
|
+
if spec
|
59
|
+
port << columnize(spec.join(', '), 80, 2)
|
60
|
+
port << "\n"
|
61
|
+
end
|
62
|
+
if desc
|
63
|
+
port << columnize(desc, 80, 8)
|
64
|
+
port << "\n"
|
65
|
+
end
|
66
|
+
end
|
67
|
+
port << "\n"
|
68
|
+
end
|
69
|
+
|
70
|
+
if((txt = cget 'EXAMPLES'))
|
71
|
+
port << txt << "\n"
|
72
|
+
end
|
73
|
+
|
74
|
+
port
|
75
|
+
#--}}}
|
76
|
+
end
|
77
|
+
module_function :usage
|
78
|
+
public :usage
|
79
|
+
|
80
|
+
PROGNAM = 'rq'
|
81
|
+
|
82
|
+
# :nodoc
|
83
|
+
USAGE_BANNER =
|
84
|
+
#--{{{
|
85
|
+
<<-usage_banner
|
86
|
+
NAME
|
87
|
+
|
88
|
+
rq v#{ VERSION }
|
89
|
+
|
90
|
+
SYNOPSIS
|
91
|
+
|
92
|
+
rq (queue | export RQ_Q=q) mode [mode_args]* [options]*
|
93
|
+
|
94
|
+
usage_banner
|
95
|
+
#--}}}
|
96
|
+
|
97
|
+
# :nodoc
|
98
|
+
USAGE =
|
99
|
+
#--{{{
|
100
|
+
<<-usage
|
101
|
+
#{ USAGE_BANNER }
|
102
|
+
URIS
|
103
|
+
|
104
|
+
#{ WEBSITE } - main website
|
105
|
+
http://www.linuxjournal.com/article/7922
|
106
|
+
|
107
|
+
and
|
108
|
+
|
109
|
+
http://rubyforge.org/projects/codeforpeople/
|
110
|
+
http://codeforpeople.com/lib/ruby/rq/
|
111
|
+
|
112
|
+
LICENSE
|
113
|
+
|
114
|
+
rq is distributed under the BSD license, see the ./LICENSE file
|
115
|
+
|
116
|
+
INSTALL
|
117
|
+
|
118
|
+
See the ./INSTALL file, but quickly
|
119
|
+
|
120
|
+
gems (per node):
|
121
|
+
|
122
|
+
gem >=3.4.3:
|
123
|
+
|
124
|
+
- install sqlite2 (Debian apt-get install libsqlite0-dev)
|
125
|
+
- wget http://rubyforge.org/frs/download.php/1070/sqlite-1.3.1.gem
|
126
|
+
- gem1.8 install sqlite-1.3.1.gem
|
127
|
+
- gem1.8 install posixlock
|
128
|
+
- gem1.8 install arrayfields
|
129
|
+
- gem1.8 install lockfile
|
130
|
+
- gem1.8 install rq-ruby1.8 (or run from source)
|
131
|
+
|
132
|
+
Also available from http://bio4.dnsalias.net/download/gem/ruby1.8/
|
133
|
+
|
134
|
+
manual (cluster wide):
|
135
|
+
|
136
|
+
(note, this procedure is out of date and breaks on gcc 4.4 and later)
|
137
|
+
- download latest release from http://codeforpeople.com/lib/ruby/rq/
|
138
|
+
- tar xvfz rq-X.X.X.tgz
|
139
|
+
- cd rq-X-X-X
|
140
|
+
- cd all
|
141
|
+
- ./install.sh /full/path/to/nfs/mounted/directory/
|
142
|
+
|
143
|
+
Debian flavours:
|
144
|
+
|
145
|
+
see ./INSTALL file for latest
|
146
|
+
|
147
|
+
DESCRIPTION
|
148
|
+
|
149
|
+
ruby queue (rq) is a zero-admin zero-configuration tool used to create instant
|
150
|
+
unix clusters. rq requires only a central nfs filesystem in order to manage a
|
151
|
+
simple sqlite database as a distributed priority work queue. this simple
|
152
|
+
design allows researchers with minimal unix experience to install and
|
153
|
+
configure, in only a few minutes and without root privileges, a robust unix
|
154
|
+
cluster capable of distributing processes to many nodes - bringing dozens of
|
155
|
+
powerful cpus to their knees with a single blow. clearly this software should
|
156
|
+
be kept out of the hands of free radicals, seti enthusiasts, and one mr. j
|
157
|
+
safran.
|
158
|
+
|
159
|
+
the central concept of rq is that n nodes work in isolation to pull jobs
|
160
|
+
from an centrally mounted nfs priority work queue in a synchronized fashion.
|
161
|
+
the nodes have absolutely no knowledge of each other and all communication
|
162
|
+
is done via the queue meaning that, so long as the queue is available via
|
163
|
+
nfs and a single node is running jobs from it, the system will continue to
|
164
|
+
process jobs. there is no centralized process whatsoever - all nodes work
|
165
|
+
to take jobs from the queue and run them as fast as possible. this creates
|
166
|
+
a system which load balances automatically and is robust in face of node
|
167
|
+
failures.
|
168
|
+
|
169
|
+
although the rq system is simple in it's design it features powerful
|
170
|
+
functionality such as priority management, predicate and sql query, compact
|
171
|
+
streaming command-line processing, programmable api, hot-backup, and
|
172
|
+
input/capture of the stdin/stdout/stderr io streams of remote jobs. to date
|
173
|
+
rq has had no reported runtime failures and is in operation at
|
174
|
+
dozens of research centers around the world. while rq is written in
|
175
|
+
the Ruby programming language, there is no Ruby programming
|
176
|
+
involved in using rq.
|
177
|
+
|
178
|
+
INVOCATION
|
179
|
+
|
180
|
+
the first argument to any rq command is the always the name of the queue
|
181
|
+
while the second is the mode of operation. the queue name may be omitted
|
182
|
+
if, and only if, the environment variable RQ_Q has been set to contain the
|
183
|
+
absolute path of target queue.
|
184
|
+
|
185
|
+
for instance, the command
|
186
|
+
|
187
|
+
~ > rq queue list
|
188
|
+
|
189
|
+
is equivalent to
|
190
|
+
|
191
|
+
~ > export RQ_Q=queue
|
192
|
+
~ > rq list
|
193
|
+
|
194
|
+
this facility can be used to create aliases for several queues, for example,
|
195
|
+
a .bashrc containing
|
196
|
+
|
197
|
+
alias MYQ="RQ_Q=/path/to/myq rq"
|
198
|
+
|
199
|
+
alias MYQ2="RQ_Q=/path/to/myq2 rq"
|
200
|
+
|
201
|
+
would allow syntax like
|
202
|
+
|
203
|
+
MYQ2 submit < joblist
|
204
|
+
|
205
|
+
MODES
|
206
|
+
|
207
|
+
rq operates in modes create, submit, resubmit, list, status, delete, update,
|
208
|
+
query, execute, configure, snapshot, lock, backup, rotate, feed, recover,
|
209
|
+
ioview, cron, help, and a few others. the meaning of 'mode_args' will
|
210
|
+
naturally change depending on the mode of operation.
|
211
|
+
|
212
|
+
the following mode abbreviations exist, note that not all modes have
|
213
|
+
abbreviations
|
214
|
+
|
215
|
+
c => create
|
216
|
+
s => submit
|
217
|
+
r => resubmit
|
218
|
+
l => list
|
219
|
+
ls => list
|
220
|
+
t => status
|
221
|
+
d => delete
|
222
|
+
rm => delete
|
223
|
+
u => update
|
224
|
+
q => query
|
225
|
+
e => execute
|
226
|
+
C => configure
|
227
|
+
S => snapshot
|
228
|
+
L => lock
|
229
|
+
b => backup
|
230
|
+
R => rotate
|
231
|
+
f => feed
|
232
|
+
io => ioview
|
233
|
+
0 => stdin
|
234
|
+
1 => stdout
|
235
|
+
2 => stderr
|
236
|
+
h => help
|
237
|
+
|
238
|
+
|
239
|
+
create, c :
|
240
|
+
|
241
|
+
creates a queue. the queue must be located on an nfs mounted file system
|
242
|
+
visible from all nodes intended to run jobs from it. nfs locking must be
|
243
|
+
functional on this file system.
|
244
|
+
|
245
|
+
examples :
|
246
|
+
|
247
|
+
0) to create a queue
|
248
|
+
~ > rq /path/to/nfs/mounted/q create
|
249
|
+
|
250
|
+
or, using the abbreviation
|
251
|
+
|
252
|
+
~ > rq /path/to/nfs/mounted/q c
|
253
|
+
|
254
|
+
|
255
|
+
submit, s :
|
256
|
+
|
257
|
+
submit jobs to a queue to be proccesed by some feeding node. any
|
258
|
+
'mode_args' are taken as the command to run. note that 'mode_args' are
|
259
|
+
subject to shell expansion - if you don't understand what this means do
|
260
|
+
not use this feature and pass jobs on stdin.
|
261
|
+
|
262
|
+
when running in submit mode a file may by specified as a list of commands
|
263
|
+
to run using the '--infile, -i' option. this file is taken to be a
|
264
|
+
newline separated list of commands to submit, blank lines and comments (#)
|
265
|
+
are allowed. if submitting a large number of jobs the input file method
|
266
|
+
is MUCH, more efficient. if no commands are specified on the command line
|
267
|
+
rq automatically reads them from stdin. yaml formatted files are also
|
268
|
+
allowed as input (http://www.yaml.org/) - note that the output of nearly
|
269
|
+
all rq commands is valid yaml and may, therefore, be piped as input into
|
270
|
+
the submit command. the leading '---' of yaml file may not be omitted.
|
271
|
+
|
272
|
+
when submitting the '--priority, -p' option can be used here to determine
|
273
|
+
the priority of jobs. priorities may be any whole number including
|
274
|
+
negative ones - zero is the default. note that submission of a high
|
275
|
+
priority job will NOT supplant a currently running low priority job, but
|
276
|
+
higher priority jobs WILL always migrate above lower priority jobs in the
|
277
|
+
queue in order that they be run as soon as possible. constant submission
|
278
|
+
of high priority jobs may create a starvation situation whereby low
|
279
|
+
priority jobs are never allowed to run. avoiding this situation is the
|
280
|
+
responsibility of the user. the only guaruntee rq makes regarding job
|
281
|
+
execution is that jobs are executed in an 'oldest-highest-priority' order
|
282
|
+
and that running jobs are never supplanted. jobs submitted with the
|
283
|
+
'--stage' option will not be eligible to be run by any node and will
|
284
|
+
remain in a 'holding' state until updated (see update mode) into the
|
285
|
+
'pending' mode, this option allows jobs to entered, or 'staged', in the
|
286
|
+
queue and then made candidates for running at a later date.
|
287
|
+
|
288
|
+
rq allows the stdin of commands to be provided and also captures the
|
289
|
+
stdout and stderr of any job run (of course standard shell redirects may
|
290
|
+
be used as well) and all three will be stored in a directory relative the
|
291
|
+
the queue itself. the stdin/stdout/stderr files are stored by job id and
|
292
|
+
there location (though relative to the queue) is shown in the output of
|
293
|
+
'list' (see docs for list).
|
294
|
+
|
295
|
+
|
296
|
+
examples :
|
297
|
+
|
298
|
+
0) submit the job ls to run on some feeding host
|
299
|
+
|
300
|
+
~ > rq q s ls
|
301
|
+
|
302
|
+
1) submit the job ls to run on some feeding host, at priority 9
|
303
|
+
|
304
|
+
~ > rq -p9 q s ls
|
305
|
+
|
306
|
+
2) submit a list of jobs from file. note the '-' used to specify
|
307
|
+
reading jobs from stdin
|
308
|
+
|
309
|
+
~ > cat joblist
|
310
|
+
job1.sh
|
311
|
+
job2.sh
|
312
|
+
job2.sh
|
313
|
+
|
314
|
+
~ > rq q submit --infile=joblist
|
315
|
+
|
316
|
+
3) submit a joblist on stdin
|
317
|
+
|
318
|
+
~ > cat joblist | rq q submit -
|
319
|
+
|
320
|
+
or
|
321
|
+
|
322
|
+
~ > rq q submit - <joblist
|
323
|
+
|
324
|
+
4) submit cat as a job, providing the stdin for cat from the file cat.in
|
325
|
+
|
326
|
+
~ > rq q submit cat --stdin=cat.in
|
327
|
+
|
328
|
+
5) submit cat as a job, providing the stdin for the cat job on stdin
|
329
|
+
|
330
|
+
~ > cat cat.in | rq q submit cat --stdin=-
|
331
|
+
|
332
|
+
or
|
333
|
+
|
334
|
+
~ > rq q submit cat --stdin=- <cat.in
|
335
|
+
|
336
|
+
6) submit 42 priority 9 jobs from a command file, marking them as
|
337
|
+
'important' using the '--tag, -t' option.
|
338
|
+
|
339
|
+
~ > wc -l cmdfile
|
340
|
+
42
|
341
|
+
|
342
|
+
~ > rq -p9 -timportant q s < cmdfile
|
343
|
+
|
344
|
+
6) re-submit all the 'important' jobs (see 'query' section below)
|
345
|
+
|
346
|
+
~ > rq q query tag=important | rq q s -
|
347
|
+
|
348
|
+
8) re-submit all jobs which are already finished (see 'list' section
|
349
|
+
below)
|
350
|
+
|
351
|
+
~ > rq q l f | rq q s
|
352
|
+
|
353
|
+
|
354
|
+
9) stage the job wont_run_yet to the queue in a 'holding' state. no
|
355
|
+
feeder will run this job until it's state is upgraded to 'pending'
|
356
|
+
|
357
|
+
~ > rq q s --stage wont_run_yet
|
358
|
+
|
359
|
+
|
360
|
+
resubmit, r :
|
361
|
+
|
362
|
+
resubmit jobs back to a queue to be proccesed by a feeding node. resubmit
|
363
|
+
is essentially equivalent to submitting a job that is already in the queue
|
364
|
+
as a new job and then deleting the original job except that using resubmit
|
365
|
+
is atomic and, therefore, safer and more efficient. resubmission respects
|
366
|
+
any previous stdin provided for job input. read docs for delete and
|
367
|
+
submit for more info.
|
368
|
+
|
369
|
+
examples :
|
370
|
+
|
371
|
+
0) resubmit job 42 to the queue
|
372
|
+
|
373
|
+
~> rq q resubmit 42
|
374
|
+
|
375
|
+
1) resubmit all failed jobs
|
376
|
+
|
377
|
+
~> rq q query exit_status!=0 | rq q resubmit -
|
378
|
+
|
379
|
+
2) resubmit job 4242 with different stdin
|
380
|
+
|
381
|
+
~ rq q resubmit 4242 --stdin=new_stdin.in
|
382
|
+
|
383
|
+
|
384
|
+
list, l, ls :
|
385
|
+
|
386
|
+
list mode lists jobs of a certain state or job id. state may be one of
|
387
|
+
pending, holding, running, finished, dead, or all. any 'mode_args' that
|
388
|
+
are numbers are taken to be job id's to list.
|
389
|
+
|
390
|
+
states may be abbreviated to uniqueness, therefore the following shortcuts
|
391
|
+
apply :
|
392
|
+
|
393
|
+
p => pending
|
394
|
+
h => holding
|
395
|
+
r => running
|
396
|
+
f => finished
|
397
|
+
d => dead
|
398
|
+
a => all
|
399
|
+
|
400
|
+
examples :
|
401
|
+
|
402
|
+
0) show everything in q
|
403
|
+
~ > rq q list all
|
404
|
+
|
405
|
+
or
|
406
|
+
|
407
|
+
~ > rq q l all
|
408
|
+
|
409
|
+
or
|
410
|
+
|
411
|
+
~ > export RQ_Q=q
|
412
|
+
~ > rq l
|
413
|
+
|
414
|
+
1) show q's pending jobs
|
415
|
+
~ > rq q list pending
|
416
|
+
|
417
|
+
2) show q's running jobs
|
418
|
+
~ > rq q list running
|
419
|
+
|
420
|
+
3) show q's finished jobs
|
421
|
+
~ > rq q list finished
|
422
|
+
|
423
|
+
4) show job id 42
|
424
|
+
~ > rq q l 42
|
425
|
+
|
426
|
+
5) show q's holding jobs
|
427
|
+
~ > rq q list holding
|
428
|
+
|
429
|
+
|
430
|
+
status, t :
|
431
|
+
|
432
|
+
status mode shows the global state the queue and statistics on it's the
|
433
|
+
cluster's performance. there are no 'mode_args'. the meaning of each
|
434
|
+
state is as follows:
|
435
|
+
|
436
|
+
pending => no feeder has yet taken this job
|
437
|
+
holding => a hold has been placed on this job, thus no feeder will start
|
438
|
+
it
|
439
|
+
running => a feeder has taken this job
|
440
|
+
finished => a feeder has finished this job
|
441
|
+
dead => rq died while running a job, has restarted, and moved
|
442
|
+
this job to the dead state
|
443
|
+
|
444
|
+
note that rq cannot move jobs into the dead state unless it has been
|
445
|
+
restarted. this is because no node has any knowledge of other nodes and
|
446
|
+
cannot possibly know if a job was started on a node that subsequently
|
447
|
+
died, or that it is simply taking a very long time to complete. only the
|
448
|
+
node that dies, upon restart, can determine that it owns jobs that 'were
|
449
|
+
started before it started running jobs', an impossibility, and move these
|
450
|
+
jobs into the dead state.
|
451
|
+
|
452
|
+
normally only a machine crash would cause a job to be placed into the dead
|
453
|
+
state. dead jobs are automatically restarted if, and only if, the job was
|
454
|
+
submitted with the '--restartable' flag.
|
455
|
+
|
456
|
+
status breaks down a variety of canned statistics about a nodes'
|
457
|
+
performance based solely on the jobs currently in the queue. only one
|
458
|
+
option affects the ouput: '--exit'. this option is used to specify
|
459
|
+
additionaly exit code mappings on which to report. normally rq will
|
460
|
+
report any job with an exit code of 0 as being 'successes' and any job
|
461
|
+
with an exit code that is not 0, or a status of 'dead', as being
|
462
|
+
'failures'. if the '--exit' switch is used then additional mappings can
|
463
|
+
be specified, note that the the semantics for 'successes' and 'failures'
|
464
|
+
does not change - this keyword specifies extra mappings.
|
465
|
+
|
466
|
+
examples :
|
467
|
+
|
468
|
+
0) show q's status
|
469
|
+
|
470
|
+
~ > rq q t
|
471
|
+
|
472
|
+
2) show q's status, consider any exit code of 42 will be listed as 'ok'
|
473
|
+
|
474
|
+
~ > rq q t --exit ok=42
|
475
|
+
|
476
|
+
3) show q's status, consider any exit code of 42 or 43 will be listed as
|
477
|
+
'ok' and 127 will be listed as 'command_not_found'. notice the quoting
|
478
|
+
required.
|
479
|
+
|
480
|
+
~ > rq q t --exit 'ok=42,43 command_not_found=127'
|
481
|
+
|
482
|
+
|
483
|
+
delete, d :
|
484
|
+
|
485
|
+
delete combinations of pending, holding, finished, dead, or jobs specified
|
486
|
+
by jid. the delete mode is capable of parsing the output of list and
|
487
|
+
query modes, making it possible to create custom filters to delete jobs
|
488
|
+
meeting very specific conditions.
|
489
|
+
|
490
|
+
'mode_args' are the same as for list.
|
491
|
+
|
492
|
+
note that it is NOT possible to delete a running job. rq has a
|
493
|
+
decentralized architechture which means that compute nodes are completely
|
494
|
+
independant of one another; an extension is that there is no way to
|
495
|
+
communicate the deletion of a running job from the queue the the node
|
496
|
+
actually running that job. it is not an error to force a job to die
|
497
|
+
prematurely using a facility such as an ssh command spawned on the remote
|
498
|
+
host to kill it. once a job has been noted to have finished, whatever the
|
499
|
+
exit status, it can be deleted from the queue.
|
500
|
+
|
501
|
+
examples :
|
502
|
+
|
503
|
+
0) delete all pending, finished, and dead jobs from a queue
|
504
|
+
|
505
|
+
~ > rq q d all
|
506
|
+
|
507
|
+
1) delete all pending jobs from a queue
|
508
|
+
|
509
|
+
~ > rq q d p
|
510
|
+
|
511
|
+
2) delete all finished jobs from a queue
|
512
|
+
|
513
|
+
~ > rq q d f
|
514
|
+
|
515
|
+
3) delete jobs via hand crafted filter program
|
516
|
+
|
517
|
+
~ > rq q list | yaml_filter_prog | rq q d -
|
518
|
+
|
519
|
+
an example ruby filter program (you have to love this)
|
520
|
+
|
521
|
+
~ > cat yaml_filter_prog
|
522
|
+
require 'yaml'
|
523
|
+
joblist = YAML::load STDIN
|
524
|
+
y joblist.select{|job| job['command'] =~ /bombing_program/}
|
525
|
+
|
526
|
+
this program reads the list of jobs (yaml) from stdin and then dumps
|
527
|
+
only those jobs whose command matches 'bombing_program', which is
|
528
|
+
subsequently piped to the delete command.
|
529
|
+
|
530
|
+
|
531
|
+
update, u :
|
532
|
+
|
533
|
+
update assumes all leading arguments are jids to update with subsequent
|
534
|
+
key=value pairs. currently only the 'command', 'priority', and 'tag'
|
535
|
+
fields of pending jobs can be generically updated and the 'state' field
|
536
|
+
may be toggled between pending and holding.
|
537
|
+
|
538
|
+
examples:
|
539
|
+
|
540
|
+
0) update the priority of job 42
|
541
|
+
|
542
|
+
~ > rq q update 42 priority=7
|
543
|
+
|
544
|
+
1) update the priority of all pending jobs
|
545
|
+
|
546
|
+
~ > rq q update pending priority=7
|
547
|
+
|
548
|
+
2) query jobs with a command matching 'foobar' and update their command
|
549
|
+
to be 'barfoo'
|
550
|
+
|
551
|
+
~ > rq q q "command like '%foobar%'" |\\
|
552
|
+
rq q u command=barfoo
|
553
|
+
|
554
|
+
3) place a hold on jid 2
|
555
|
+
|
556
|
+
~ > rq q u 2 state=holding
|
557
|
+
|
558
|
+
4) place a hold on all jobs with tag=disk_filler
|
559
|
+
|
560
|
+
~ > rq q q tag=disk_filler | rq q u state=holding -
|
561
|
+
|
562
|
+
5) remove the hold on jid 2
|
563
|
+
|
564
|
+
~ > rq q u 2 state=pending
|
565
|
+
|
566
|
+
|
567
|
+
query, q :
|
568
|
+
|
569
|
+
query exposes the database more directly the user, evaluating the where
|
570
|
+
clause specified on the command line (or read from stdin). this feature
|
571
|
+
can be used to make a fine grained slection of jobs for reporting or as
|
572
|
+
input into the delete command. you must have a basic understanding of SQL
|
573
|
+
syntax to use this feature, but it is fairly intuitive in this limited
|
574
|
+
capacity.
|
575
|
+
|
576
|
+
examples:
|
577
|
+
|
578
|
+
0) show all jobs submitted within a specific 10 minute range
|
579
|
+
|
580
|
+
~ > a='2004-06-29 22:51:00'
|
581
|
+
|
582
|
+
~ > b='2004-06-29 22:51:10'
|
583
|
+
|
584
|
+
~ > rq q query "started >= '$a' and started < '$b'"
|
585
|
+
|
586
|
+
1) shell quoting can be tricky here so input on stdin is also allowed to
|
587
|
+
avoid shell expansion
|
588
|
+
|
589
|
+
~ > cat constraints.txt
|
590
|
+
started >= '2004-06-29 22:51:00' and
|
591
|
+
started < '2004-06-29 22:51:10'
|
592
|
+
|
593
|
+
~ > rq q query < contraints.txt
|
594
|
+
or (same thing)
|
595
|
+
|
596
|
+
~ > cat contraints.txt| rq q query -
|
597
|
+
|
598
|
+
2) this query output might then be used to delete those jobs
|
599
|
+
|
600
|
+
~ > cat contraints.txt | rq q q - | rq q d -
|
601
|
+
|
602
|
+
3) show all jobs which are either finished or dead
|
603
|
+
|
604
|
+
~ > rq q q "state='finished' or state='dead'"
|
605
|
+
|
606
|
+
4) show all jobs which have non-zero exit status
|
607
|
+
|
608
|
+
~ > rq q query exit_status!=0
|
609
|
+
|
610
|
+
5) if you plan to query groups of jobs with some common feature consider
|
611
|
+
using the '--tag, -t' feature of the submit mode which allows a user to
|
612
|
+
tag a job with a user defined string which can then be used to easily
|
613
|
+
query that job group
|
614
|
+
|
615
|
+
~ > rq q submit --tag=my_jobs - < joblist
|
616
|
+
|
617
|
+
~ > rq q query tag=my_jobs
|
618
|
+
|
619
|
+
|
620
|
+
6) in general all but numbers will need to be surrounded by single
|
621
|
+
quotes unless the query is a 'simple' one. a simple query is a query
|
622
|
+
with no boolean operators, not quotes, and where every part of it looks
|
623
|
+
like
|
624
|
+
|
625
|
+
key op value
|
626
|
+
|
627
|
+
with ** NO SPACES ** between key, op, and value. if, and only if,
|
628
|
+
the query is 'simple' rq will contruct the where clause
|
629
|
+
appropriately. the operators accepted, and their meanings, are
|
630
|
+
|
631
|
+
= : equivalence : sql =
|
632
|
+
=~ : matches : sql like
|
633
|
+
!~ : not matches : sql not like
|
634
|
+
|
635
|
+
match, in the context is ** NOT ** a regular expression but a sql
|
636
|
+
style string match. about all you need to know about sql matches is
|
637
|
+
that the '%' char matches anything. multiple simple queries will be
|
638
|
+
joined with boolean 'and'
|
639
|
+
|
640
|
+
this sounds confusing - it isn't. here are some examples of simple
|
641
|
+
queries
|
642
|
+
|
643
|
+
6.a)
|
644
|
+
query :
|
645
|
+
rq q query tag=important
|
646
|
+
|
647
|
+
where_clause :
|
648
|
+
"( tag = 'important' )"
|
649
|
+
|
650
|
+
6.b)
|
651
|
+
query :
|
652
|
+
rq q q priority=6 restartable=true
|
653
|
+
|
654
|
+
where_clause :
|
655
|
+
"( priority = 6 ) and ( restartable = 'true' )"
|
656
|
+
|
657
|
+
6.c)
|
658
|
+
query :
|
659
|
+
rq q q command=~%bombing_job% runner=~%node_1%
|
660
|
+
|
661
|
+
where_clause :
|
662
|
+
"( command like '%bombing_job%') and (runner like '%node_1%')"
|
663
|
+
|
664
|
+
|
665
|
+
execute, e :
|
666
|
+
|
667
|
+
execute mode is to be used by expert users with a knowledge of sql syntax
|
668
|
+
only. it follows the locking protocol used by rq and then allows the user
|
669
|
+
to execute arbitrary sql on the queue. unlike query mode a write lock on
|
670
|
+
the queue is obtained allowing a user to definitively shoot themselves in
|
671
|
+
the foot. for details on a queue's schema the file 'db.schema' in the
|
672
|
+
queue directory should be examined.
|
673
|
+
|
674
|
+
examples :
|
675
|
+
|
676
|
+
0) list all jobs
|
677
|
+
|
678
|
+
~ > rq q execute 'select * from jobs'
|
679
|
+
|
680
|
+
|
681
|
+
configure, C :
|
682
|
+
|
683
|
+
this mode is not supported yet.
|
684
|
+
|
685
|
+
|
686
|
+
snapshot, p :
|
687
|
+
|
688
|
+
snapshot provides a means of taking a snapshot of the q. use this feature
|
689
|
+
when many queries are going to be run; for example when attempting to
|
690
|
+
figure out a complex pipeline command your test queries will not compete
|
691
|
+
with the feeders for the queue's lock. you should use this option
|
692
|
+
whenever possible to avoid lock competition.
|
693
|
+
|
694
|
+
examples:
|
695
|
+
|
696
|
+
0) take a snapshot using default snapshot naming, which is made via the
|
697
|
+
basename of the q plus '.snapshot'
|
698
|
+
|
699
|
+
~ > rq /path/to/nfs/q snapshot
|
700
|
+
|
701
|
+
1) use this snapshot to chceck status
|
702
|
+
|
703
|
+
~ > rq ./q.snapshot status
|
704
|
+
|
705
|
+
2) use the snapshot to see what's running on which host
|
706
|
+
|
707
|
+
~ > rq ./q.snapshot list running | grep `hostname`
|
708
|
+
|
709
|
+
note that there is also a snapshot option - this option is not the same as
|
710
|
+
the snapshot command. the option can be applied to ANY command. if in
|
711
|
+
effect then that command will be run on a snapshot of the database and the
|
712
|
+
snapshot then immediately deleted. this is really only useful if one were
|
713
|
+
to need to run a command against a very heavily loaded queue and did not
|
714
|
+
wish to wait to obtain the lock. eg.
|
715
|
+
|
716
|
+
0) get the status of a heavily loaded queue
|
717
|
+
|
718
|
+
~ > rq q t --snapshot
|
719
|
+
|
720
|
+
1) same as above
|
721
|
+
|
722
|
+
~ > rq q t -s
|
723
|
+
|
724
|
+
** IMPORTANT **
|
725
|
+
|
726
|
+
a really great way to hang all processing in your queue is to do this
|
727
|
+
|
728
|
+
rq q list | less
|
729
|
+
|
730
|
+
and then leave for the night. you hold a read lock you won't release
|
731
|
+
until less dies. this is what snapshot is made for! use it like
|
732
|
+
|
733
|
+
rq q list -s | less
|
734
|
+
|
735
|
+
now you've taken a snapshot of the queue to list so your locks affect no
|
736
|
+
one.
|
737
|
+
|
738
|
+
|
739
|
+
lock, L :
|
740
|
+
|
741
|
+
lock the queue and then execute an arbitrary shell command. lock mode
|
742
|
+
uses the queue's locking protocol to safely obtain a lock of the specified
|
743
|
+
type and execute a command on the user's behalf. lock type must be one of
|
744
|
+
|
745
|
+
(r)ead | (sh)ared | (w)rite | (ex)clusive
|
746
|
+
|
747
|
+
examples :
|
748
|
+
|
749
|
+
0) get a read lock on the queue and make a backup
|
750
|
+
|
751
|
+
~ > rq q L read -- cp -r q q.bak
|
752
|
+
|
753
|
+
(the '--' is needed to tell rq to stop parsing command line
|
754
|
+
options which allows the '-r' to be passed to the 'cp' command)
|
755
|
+
|
756
|
+
** IMPORTANT **
|
757
|
+
|
758
|
+
this is another fantastic way to freeze your queue - use with care!
|
759
|
+
|
760
|
+
|
761
|
+
backup, b :
|
762
|
+
|
763
|
+
backup mode is exactly the same as getting a read lock on the queue and
|
764
|
+
making a copy of it. this mode is provided as a convenience.
|
765
|
+
|
766
|
+
0) make a backup of the queue using default naming ( qname + timestamp + .bak )
|
767
|
+
|
768
|
+
~ > rq q b
|
769
|
+
|
770
|
+
1) make a backup of the queue as 'q.bak'
|
771
|
+
|
772
|
+
~ > rq q b q.bak
|
773
|
+
|
774
|
+
|
775
|
+
rotate, r :
|
776
|
+
|
777
|
+
rotate mode is conceptually similar to log rolling. normally the list of
|
778
|
+
finished jobs will grow without bound in a queue unless they are manually
|
779
|
+
deleted. rotation is a method of trimming finished jobs from a queue
|
780
|
+
without deleting them. the method used is that the queue is copied to a
|
781
|
+
'rotation'; all jobs that are dead or finished are deleted from the
|
782
|
+
original queue and all pending and running jobs are deleted from the
|
783
|
+
rotation. in this way the rotation becomes a record of the queue's
|
784
|
+
finished and dead jobs at the time the rotation was made.
|
785
|
+
|
786
|
+
0) rotate a queue using default rotation name
|
787
|
+
|
788
|
+
~ > rq q rotate
|
789
|
+
|
790
|
+
1) rotate a queue naming the rotation
|
791
|
+
|
792
|
+
~ > rq q rotate q.rotation
|
793
|
+
|
794
|
+
2) a crontab entry like this could be used to rotate a queue daily
|
795
|
+
|
796
|
+
59 23 * * * rq q rotate `date +q.%Y%m%d`
|
797
|
+
|
798
|
+
|
799
|
+
feed, f :
|
800
|
+
|
801
|
+
take jobs from the queue and run them on behalf of the submitter as
|
802
|
+
quickly as possible. jobs are taken from the queue in an 'oldest highest
|
803
|
+
priority' first order.
|
804
|
+
|
805
|
+
feeders can be run from any number of nodes allowing you to harness the
|
806
|
+
CPU power of many nodes simoultaneously in order to more effectively
|
807
|
+
clobber your network, anoy your sysads, and set output raids on fire.
|
808
|
+
|
809
|
+
the most useful method of feeding from a queue is to do so in daemon mode
|
810
|
+
so that if the process loses it's controling terminal it will not exit
|
811
|
+
when you exit your terminal session. use the '--daemon, -d' option to
|
812
|
+
accomplish this. by default only one feeding process per host per queue
|
813
|
+
is allowed to run at any given moment. because of this it is acceptable
|
814
|
+
to start a feeder at some regular interval from a cron entry since, if a
|
815
|
+
feeder is alreay running, the process will simply exit and otherwise a new
|
816
|
+
feeder will be started. in this way you may keep feeder processing
|
817
|
+
running even acroess machine reboots without requiring sysad intervention
|
818
|
+
to add an entry to the machine's startup tasks.
|
819
|
+
|
820
|
+
|
821
|
+
examples :
|
822
|
+
|
823
|
+
0) feed from a queue verbosely for debugging purposes, using a minimum
|
824
|
+
and maximum polling time of 2 and 4 respectively. you would NEVER
|
825
|
+
specify polling times this brief except for debugging purposes!!!
|
826
|
+
|
827
|
+
~ > rq q feed -v4 --min_sleep=2 --max_sleep=4
|
828
|
+
|
829
|
+
1) same as above, but viewing the executed sql as it is sent to the
|
830
|
+
database
|
831
|
+
|
832
|
+
~ > RQ_SQL_DEBUG=1 rq q feed -v4 --min_sleep=2 --max_sleep=4
|
833
|
+
|
834
|
+
2) feed from a queue in daemon mode - logging to /home/ahoward/rq.log
|
835
|
+
|
836
|
+
~ > rq q feed --daemon -l/home/$USER/rq.log
|
837
|
+
|
838
|
+
log rolling in daemon mode is automatic so your logs should never
|
839
|
+
need to be deleted to prevent disk overflow.
|
840
|
+
|
841
|
+
|
842
|
+
start :
|
843
|
+
|
844
|
+
the start mode is equivalent to running the feed mode except the --daemon
|
845
|
+
is implied so the process instantly goes into the background. also, if no
|
846
|
+
log (--log) is specified in start mode a default one is used. the default
|
847
|
+
is /home/$USER/$BASENAME_OF_Q.log
|
848
|
+
|
849
|
+
examples :
|
850
|
+
|
851
|
+
0) start a daemon process feeding from q
|
852
|
+
|
853
|
+
~ > rq q start
|
854
|
+
|
855
|
+
1) use something like this sample crontab entry to keep a feeder running
|
856
|
+
forever - it attempts to (re)start every fifteen minutes but exits if
|
857
|
+
another process is already feeding. output is only created when the
|
858
|
+
daemon is started so your mailbox will not fill up with this crontab
|
859
|
+
entry:
|
860
|
+
|
861
|
+
#
|
862
|
+
# crontab.sample
|
863
|
+
#
|
864
|
+
|
865
|
+
*/15 * * * * /path/to/bin/rq /path/to/q start
|
866
|
+
|
867
|
+
and entry like this on every node in your cluster is all that's needed
|
868
|
+
to keep your cluster going - even after a reboot.
|
869
|
+
|
870
|
+
|
871
|
+
shutdown :
|
872
|
+
|
873
|
+
tell a running feeder to finish any pending jobs and then to exit. this
|
874
|
+
is equivalent to sending signal 'SIGTERM' to the process - this is what
|
875
|
+
using 'kill pid' does by default.
|
876
|
+
|
877
|
+
examples :
|
878
|
+
|
879
|
+
0) stop a feeding process, if any, that is feeding from q. allow all
|
880
|
+
jobs to be finished first.
|
881
|
+
|
882
|
+
~ > rq q shutdown
|
883
|
+
|
884
|
+
** VERY IMPORTANT **
|
885
|
+
|
886
|
+
if you are keeping your feeder alive with a crontab entry you'll need to
|
887
|
+
comment it out before doing this or else it will simply re-start!!!
|
888
|
+
|
889
|
+
stop :
|
890
|
+
|
891
|
+
tell any running feeder to stop NOW. this sends signal 'SIGKILL' (-9) to
|
892
|
+
the feeder process. the same warning as for shutdown applies!!!
|
893
|
+
|
894
|
+
examples :
|
895
|
+
|
896
|
+
0) stop a feeding process, if any, that is feeding from q. allow NO
|
897
|
+
jobs to be finished first - exit instantly.
|
898
|
+
|
899
|
+
~ > rq q stop
|
900
|
+
|
901
|
+
cron :
|
902
|
+
|
903
|
+
when given 'start' for 'mode_args' this option automatically adds a
|
904
|
+
crontab entry to keep a feeder alive indefinitely and starts a feeder in
|
905
|
+
the background. this is a shortcut to start a feeder and ensure it stays
|
906
|
+
running forever, even across re-boots.
|
907
|
+
|
908
|
+
'stop' as an argument applys the inverse option: any crontab entry is
|
909
|
+
removed and the daemon shutdown nicely. a second argument of 'hard' will
|
910
|
+
do a stop instead of a shutdown.
|
911
|
+
|
912
|
+
the addition and subtraction of crontab entries is robust, however, if you
|
913
|
+
already have crontab lines maintaining your feeders with a vastly
|
914
|
+
different syntax it would be best to shut down, remove them, and then let
|
915
|
+
rq manage them. then again, some people are quite brave...
|
916
|
+
|
917
|
+
examples :
|
918
|
+
|
919
|
+
0) automatically add crontab entry and start daemon feeder
|
920
|
+
|
921
|
+
~ > rq q cron start
|
922
|
+
|
923
|
+
1) automatically remove crontab entry and shutdown daemon feeder nicely
|
924
|
+
|
925
|
+
~ > rq q cron shutdown
|
926
|
+
|
927
|
+
2) the same, but using stop instead of shutdown
|
928
|
+
|
929
|
+
~ > rq q cron stop
|
930
|
+
|
931
|
+
pid :
|
932
|
+
|
933
|
+
show the pid, if any, of the feeder on this host
|
934
|
+
|
935
|
+
~ > rq q feeder
|
936
|
+
---
|
937
|
+
pid : 3176
|
938
|
+
|
939
|
+
|
940
|
+
ioview, io :
|
941
|
+
|
942
|
+
as shown in the description for submit, a job maybe be provided stdin
|
943
|
+
during job submission. the stdout and stderr of the job are also captured
|
944
|
+
as the job is run. all three streams are captured in files located
|
945
|
+
relative to the queue. so, if one has submitted a job, and it's jid was
|
946
|
+
shown to be 42, by using something like
|
947
|
+
|
948
|
+
~ > rq /path/to/q submit myjob --stdin=myjob.in
|
949
|
+
---
|
950
|
+
-
|
951
|
+
jid : 42
|
952
|
+
priority : 0
|
953
|
+
...
|
954
|
+
stdin : stdin/42
|
955
|
+
stdout : stdout/42
|
956
|
+
stderr : stderr/42
|
957
|
+
...
|
958
|
+
command : myjob
|
959
|
+
|
960
|
+
the stdin file will exists as soon as the job is submitted and the others
|
961
|
+
will exist once the job has begun running. note that these paths are
|
962
|
+
shown relative to the queue. in this case the actual paths would be
|
963
|
+
|
964
|
+
/path/to/q/stdin/42
|
965
|
+
/path/to/q/stdout/42
|
966
|
+
/path/to/q/stderr/42
|
967
|
+
|
968
|
+
but, since our queue is nfs mounted the /path/to/q may or may not be the
|
969
|
+
same on every host. thus the path is a relative one. this can make it
|
970
|
+
anoying to view these files, but rq assists here with the ioview command.
|
971
|
+
the ioview command spawns an external editor to view all three files.
|
972
|
+
it's use is quite simple
|
973
|
+
|
974
|
+
examples :
|
975
|
+
|
976
|
+
0) view the stdin/stdout/stderr of job id 42
|
977
|
+
|
978
|
+
~ > rq q ioview 42
|
979
|
+
|
980
|
+
by default this will open up all three files in vim. the editor command
|
981
|
+
can be specified using the '--editor' option or the ENV var RQ_EDITOR.
|
982
|
+
the default value is 'vim -R -o' which allows all three files to be opened
|
983
|
+
in a single window.
|
984
|
+
|
985
|
+
|
986
|
+
stdin, 0 :
|
987
|
+
|
988
|
+
dump the stdinput (if any) provided to the job
|
989
|
+
|
990
|
+
examples :
|
991
|
+
|
992
|
+
0) dump the stdin for jid 42
|
993
|
+
|
994
|
+
~ > rq q stdin 42
|
995
|
+
|
996
|
+
|
997
|
+
stdout, 1 :
|
998
|
+
|
999
|
+
dump the stdoutput (if any) created by the job
|
1000
|
+
|
1001
|
+
examples :
|
1002
|
+
|
1003
|
+
0) dump the stdout for jid 42
|
1004
|
+
|
1005
|
+
~ > rq q stdout 42
|
1006
|
+
|
1007
|
+
|
1008
|
+
stderr, 2 :
|
1009
|
+
|
1010
|
+
dump the stderrput (if any) created by the job
|
1011
|
+
|
1012
|
+
examples :
|
1013
|
+
|
1014
|
+
0) dump the stderr for jid 42
|
1015
|
+
|
1016
|
+
~ > rq q stderr 42
|
1017
|
+
|
1018
|
+
|
1019
|
+
stdin4 :
|
1020
|
+
|
1021
|
+
show the path used for the stdin of a jid
|
1022
|
+
|
1023
|
+
examples :
|
1024
|
+
|
1025
|
+
0) show which file has job 42's stdin
|
1026
|
+
|
1027
|
+
~ > rq q stdin4 42
|
1028
|
+
|
1029
|
+
|
1030
|
+
stdout4 :
|
1031
|
+
|
1032
|
+
show the path used for the stdout of a jid
|
1033
|
+
|
1034
|
+
examples :
|
1035
|
+
|
1036
|
+
0) show which file has job 42's stdout
|
1037
|
+
|
1038
|
+
~ > rq q stdout4 42
|
1039
|
+
|
1040
|
+
|
1041
|
+
stderr4 :
|
1042
|
+
|
1043
|
+
show the path used for the stderr of a jid
|
1044
|
+
|
1045
|
+
examples :
|
1046
|
+
|
1047
|
+
0) show which file has job 42's stderr
|
1048
|
+
|
1049
|
+
~ > rq q stderr4 42
|
1050
|
+
|
1051
|
+
|
1052
|
+
recover :
|
1053
|
+
|
1054
|
+
it is possible that a hardware failure might corrupt an rq database. this
|
1055
|
+
isn't the kind of thing people like hearing, but it's true - hardware has
|
1056
|
+
errors. in these situations a database can sometimes be readable, but not
|
1057
|
+
writable, or some other combination. this has been reported only a
|
1058
|
+
handful of times, nevertheless, this command wraps sqlite recovery to get
|
1059
|
+
you rolling again, it's acceptable to perform recovery on a live rq
|
1060
|
+
database with active feeders
|
1061
|
+
|
1062
|
+
examples :
|
1063
|
+
|
1064
|
+
0) recover!
|
1065
|
+
|
1066
|
+
~ > rq q recover
|
1067
|
+
|
1068
|
+
|
1069
|
+
help, h :
|
1070
|
+
|
1071
|
+
this message
|
1072
|
+
|
1073
|
+
examples :
|
1074
|
+
|
1075
|
+
0) get this message
|
1076
|
+
|
1077
|
+
~> rq q help
|
1078
|
+
|
1079
|
+
or
|
1080
|
+
|
1081
|
+
~> rq help
|
1082
|
+
|
1083
|
+
NOTES
|
1084
|
+
|
1085
|
+
- realize that your job is going to be running on a remote host and this has
|
1086
|
+
implications. paths, for example, should be absolute, not relative.
|
1087
|
+
specifically the submitted job script must be visible from all hosts
|
1088
|
+
currently feeding from a queue as must be the input and output
|
1089
|
+
files/directories.
|
1090
|
+
|
1091
|
+
- jobs are currently run under the bash shell using the --login option.
|
1092
|
+
therefore any settings in your .bashrc will apply - specifically your PATH
|
1093
|
+
setting. you should not, however, rely on jobs running with any given
|
1094
|
+
environment.
|
1095
|
+
|
1096
|
+
- you need to consider __CAREFULLY__ what the ramifications of having
|
1097
|
+
multiple instances of your program all potentially running at the same
|
1098
|
+
time will be. for instance, it is beyond the scope of rq to ensure
|
1099
|
+
multiple instances of a given program will not overwrite each others
|
1100
|
+
output files. coordination of programs is left entirely to the user.
|
1101
|
+
|
1102
|
+
- the list of finished jobs will grow without bound unless you sometimes
|
1103
|
+
delete some (all) of them. the reason for this is that rq cannot know
|
1104
|
+
when the user has collected the exit_status of a given job, and so keeps
|
1105
|
+
this information in the queue forever until instructed to delete it. if
|
1106
|
+
you have collected the exit_status of you job(s) it is not an error to
|
1107
|
+
then delete that job from the finished list - the information is kept for
|
1108
|
+
your informational purposes only. in a production system it would be
|
1109
|
+
normal to periodically save, and then delete, all finished jobs.
|
1110
|
+
|
1111
|
+
- know that it is a VERY bad idea to spawn several dozen process all
|
1112
|
+
reading/writing huge output files to a single NFS server. use this
|
1113
|
+
paradigm instead
|
1114
|
+
|
1115
|
+
* copy/move data from global input space to local disk
|
1116
|
+
* process data
|
1117
|
+
* move data on local disk to global output space
|
1118
|
+
|
1119
|
+
this, of course, applies to any nfs processing, not just those jobs
|
1120
|
+
submitted to rq
|
1121
|
+
|
1122
|
+
the vsftp daemon is an excellent utility to have running on hosts in your
|
1123
|
+
cluster so anonymous ftp can be used to get/put data between any two
|
1124
|
+
hosts.
|
1125
|
+
|
1126
|
+
- know that nfs locking is very, very easy to break with firewalls put in
|
1127
|
+
place by overzealous system administrators. be postive not only that nfs
|
1128
|
+
locking works, but that lock recovery server/client crash or reboot works
|
1129
|
+
as well. http://nfs.sourceforge.net/ is the place to learn about NFS. my
|
1130
|
+
experience thus far is that there are ZERO properly configured NFS
|
1131
|
+
installations in the world. please test yours. contact me for a simple
|
1132
|
+
script which can assist you. beer donations required as payment.
|
1133
|
+
|
1134
|
+
ENVIRONMENT
|
1135
|
+
|
1136
|
+
RQ_Q: set to the full path of nfs mounted queue
|
1137
|
+
|
1138
|
+
the queue argument to all commands may be omitted if, and only if, the
|
1139
|
+
environment variable 'RQ_Q' contains the full path to the q. eg.
|
1140
|
+
|
1141
|
+
~ > export RQ_Q=/full/path/to/my/q
|
1142
|
+
|
1143
|
+
this feature can save a considerable amount of typing for those weak of
|
1144
|
+
wrist.
|
1145
|
+
|
1146
|
+
a shell script like this can also be used to avoid needing to type the
|
1147
|
+
queue name each and every time
|
1148
|
+
|
1149
|
+
~ > cat my_q
|
1150
|
+
#!/bin/sh
|
1151
|
+
rq /full/path/to/my/q "$@"
|
1152
|
+
|
1153
|
+
and then all operations become, for example
|
1154
|
+
|
1155
|
+
~> my_q submit my_mob
|
1156
|
+
~> my_q status
|
1157
|
+
~> my_q delete 42
|
1158
|
+
|
1159
|
+
RQ_OPTS | RQ_OPTIONS: specify extra options
|
1160
|
+
|
1161
|
+
this ENV var can be used to specify options which should always apply, for
|
1162
|
+
example
|
1163
|
+
|
1164
|
+
~ > export RQ_OPTS=--restartable
|
1165
|
+
|
1166
|
+
and shell script like this might be used to mark jobs submitted by a
|
1167
|
+
certain user and to always submit them at a negative priority
|
1168
|
+
|
1169
|
+
~ > cat username_q
|
1170
|
+
#!/bin/sh
|
1171
|
+
export RQ_OPTS="--tag=username --priority=-42"
|
1172
|
+
rq /full/path/to/my/q "$@"
|
1173
|
+
|
1174
|
+
actual command line options wil always override options given this way
|
1175
|
+
|
1176
|
+
DIAGNOSTICS
|
1177
|
+
|
1178
|
+
success : $? == 0
|
1179
|
+
failure : $? != 0
|
1180
|
+
|
1181
|
+
CREDITS
|
1182
|
+
|
1183
|
+
- kim baugh : patient tester and design input
|
1184
|
+
- jeff safran : the guy can break anything
|
1185
|
+
- chris elvidge : boss who made it possible
|
1186
|
+
- trond myklebust : tons of help with nfs
|
1187
|
+
- jamis buck : for writing the sqlite bindings for ruby
|
1188
|
+
- _why : for writing yaml for ruby
|
1189
|
+
- matz : for writing ruby
|
1190
|
+
|
1191
|
+
INSTALL
|
1192
|
+
|
1193
|
+
gem install rq-ruby1.8 (see top of page)
|
1194
|
+
|
1195
|
+
TEST
|
1196
|
+
|
1197
|
+
Install rq and
|
1198
|
+
|
1199
|
+
test_rq.rb
|
1200
|
+
|
1201
|
+
AUTHOR
|
1202
|
+
|
1203
|
+
#{ AUTHOR } and #{ AUTHOR2 }
|
1204
|
+
|
1205
|
+
BUGS
|
1206
|
+
|
1207
|
+
0 < bugno && bugno <= 42
|
1208
|
+
|
1209
|
+
reports on github, or to #{ AUTHOR2 } and #{ AUTHOR }
|
1210
|
+
|
1211
|
+
SEE ALSO
|
1212
|
+
|
1213
|
+
#{ WEBSITE } - main website
|
1214
|
+
|
1215
|
+
usage
|
1216
|
+
#--}}}
|
1217
|
+
#--}}}
|
1218
|
+
end # module Usage
|
1219
|
+
#--}}}
|
1220
|
+
end # module RQ
|
1221
|
+
$__rq_usage__ = __FILE__
|
1222
|
+
end
|