rq-ruby1.8 3.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. data/Gemfile +22 -0
  2. data/Gemfile.lock +22 -0
  3. data/INSTALL +166 -0
  4. data/LICENSE +10 -0
  5. data/Makefile +6 -0
  6. data/README +1183 -0
  7. data/Rakefile +37 -0
  8. data/TODO +24 -0
  9. data/TUTORIAL +230 -0
  10. data/VERSION +1 -0
  11. data/bin/rq +902 -0
  12. data/bin/rqmailer +865 -0
  13. data/example/a.rb +7 -0
  14. data/extconf.rb +198 -0
  15. data/gemspec.rb +40 -0
  16. data/install.rb +210 -0
  17. data/lib/rq.rb +155 -0
  18. data/lib/rq/arrayfields.rb +371 -0
  19. data/lib/rq/backer.rb +31 -0
  20. data/lib/rq/configfile.rb +82 -0
  21. data/lib/rq/configurator.rb +40 -0
  22. data/lib/rq/creator.rb +54 -0
  23. data/lib/rq/cron.rb +144 -0
  24. data/lib/rq/defaultconfig.txt +5 -0
  25. data/lib/rq/deleter.rb +51 -0
  26. data/lib/rq/executor.rb +40 -0
  27. data/lib/rq/feeder.rb +527 -0
  28. data/lib/rq/ioviewer.rb +48 -0
  29. data/lib/rq/job.rb +51 -0
  30. data/lib/rq/jobqueue.rb +947 -0
  31. data/lib/rq/jobrunner.rb +110 -0
  32. data/lib/rq/jobrunnerdaemon.rb +193 -0
  33. data/lib/rq/lister.rb +47 -0
  34. data/lib/rq/locker.rb +43 -0
  35. data/lib/rq/lockfile.rb +564 -0
  36. data/lib/rq/logging.rb +124 -0
  37. data/lib/rq/mainhelper.rb +189 -0
  38. data/lib/rq/orderedautohash.rb +39 -0
  39. data/lib/rq/orderedhash.rb +240 -0
  40. data/lib/rq/qdb.rb +733 -0
  41. data/lib/rq/querier.rb +98 -0
  42. data/lib/rq/rails.rb +80 -0
  43. data/lib/rq/recoverer.rb +28 -0
  44. data/lib/rq/refresher.rb +80 -0
  45. data/lib/rq/relayer.rb +283 -0
  46. data/lib/rq/resource.rb +22 -0
  47. data/lib/rq/resourcemanager.rb +40 -0
  48. data/lib/rq/resubmitter.rb +100 -0
  49. data/lib/rq/rotater.rb +98 -0
  50. data/lib/rq/sleepcycle.rb +46 -0
  51. data/lib/rq/snapshotter.rb +40 -0
  52. data/lib/rq/sqlite.rb +286 -0
  53. data/lib/rq/statuslister.rb +48 -0
  54. data/lib/rq/submitter.rb +113 -0
  55. data/lib/rq/toucher.rb +182 -0
  56. data/lib/rq/updater.rb +94 -0
  57. data/lib/rq/usage.rb +1222 -0
  58. data/lib/rq/util.rb +304 -0
  59. data/rdoc.sh +17 -0
  60. data/rq-ruby1.8.gemspec +120 -0
  61. data/test/.gitignore +1 -0
  62. data/test/test_rq.rb +145 -0
  63. data/white_box/crontab +2 -0
  64. data/white_box/joblist +8 -0
  65. data/white_box/killrq +18 -0
  66. data/white_box/rq_killer +27 -0
  67. metadata +208 -0
@@ -0,0 +1,37 @@
1
+ require 'rubygems'
2
+ require 'bundler'
3
+ begin
4
+ Bundler.setup(:default, :development)
5
+ rescue Bundler::BundlerError => e
6
+ $stderr.puts e.message
7
+ $stderr.puts "Run `bundle install` to install missing gems"
8
+ exit e.status_code
9
+ end
10
+ require 'rake'
11
+
12
+ require 'jeweler'
13
+ Jeweler::Tasks.new do |gem|
14
+ # gem is a Gem::Specification... see http://docs.rubygems.org/read/chapter/20 for more options
15
+ gem.name = "rq-ruby1.8"
16
+ gem.homepage = "http://github.com/pjotrp/rq"
17
+ gem.license = "BSD"
18
+ gem.summary = %Q{Ruby Queue scheduler}
19
+ gem.description = %Q{Zero configuration job scheduler for computer clusters}
20
+ gem.email = "pjotr.public01@thebird.nl"
21
+ gem.authors = ["Pjotr Prins"]
22
+ # Include your dependencies below. Runtime dependencies are required when using your gem,
23
+ # and development dependencies are only needed for development (ie running rake tasks, tests, etc)
24
+ # gem.add_runtime_dependency 'jabber4r', '> 0.1'
25
+ # gem.add_development_dependency 'rspec', '> 1.2.3'
26
+ end
27
+ Jeweler::RubygemsDotOrgTasks.new
28
+
29
+ task :default => :spec
30
+
31
+ task :test do
32
+ Dir.chdir('test') do
33
+ sh './test_rq.rb'
34
+ end
35
+ end
36
+
37
+
data/TODO ADDED
@@ -0,0 +1,24 @@
1
+ ---
2
+
3
+ - migrate to Ruby1.9 (problem is the posixlock module, depending on ruby1.8)
4
+ - write some rspec cases
5
+ - create gem in rubygems (for ruby1.9)
6
+
7
+ - confine/constrain mechanism
8
+ - heartbeat to slave
9
+ - ls show elapsed time
10
+ - rq q 1234 234 bug???
11
+ - full boolean resource monitoring and resource requests
12
+ - pull out some infilter/outfilter classes for all the stdin/stdout parsing
13
+ - rq relay mode (submit and track exit_status in local db)
14
+ - config file configuration for feeder
15
+ - examples dir with shell commands/scripts
16
+ - db message queue to send commands to remote nodes - ttl issues here...
17
+ - consider/test tracking feeders in queue rather than using lock on local file?
18
+ - use nodes to periodically generate stats and cache them
19
+
20
+ X rotation bug when dest directory specified
21
+ X backoff lockd recovery time
22
+ X output for start/stop (include shush)
23
+ X .rq dir for everything (log, pidfile)
24
+ X cronify
@@ -0,0 +1,230 @@
1
+ ---
2
+ === SYNOPSIS
3
+ ---
4
+
5
+ rq is a tool for instantly building simple linux clusters based on the concept
6
+ of coordinated shared access to an nfs mounted priority job queue. the idea is
7
+ a simple one: one, or more, nodes take jobs from a priority queue, run them, and
8
+ return their status and other information to the queue. the priority queue
9
+ itself is made available to all the 'feeding' nodes by virtue of it being placed
10
+ on a globally visible nfs filesystem.
11
+
12
+ -----------
13
+ | priority |
14
+ | queue |
15
+ -----------
16
+ / | \
17
+ / | \
18
+ --------nfs-------
19
+ / | \
20
+ / | \
21
+ ---------- ---------- ----------
22
+ | node_a | | node_b | | node_c |
23
+ ---------- ---------- ----------
24
+
25
+ all feeding nodes are equal, idependent, and isolated. any node capable of
26
+ mounting the nfs filesystem may submit to the queue. so long as the nfs server
27
+ and one node are up - the cluster can continute processing jobs.
28
+
29
+
30
+ ---
31
+ === EXAMPLE INSTALL
32
+ ---
33
+
34
+ download rq from http://raa.ruby-lang.org/project/rq
35
+
36
+ unpack rq-X.X.X.tgz
37
+
38
+ cd ./depends/
39
+
40
+ the ./depends/packages/ directory will contain ALL packages required to install
41
+ rq including ruby[www.ruby-lang.org]
42
+
43
+ the file ./depends/install.sh installs ALL required packages for ruby queue into
44
+ an nfs mounted directory. it is a simple script - feel free to read/edit.
45
+
46
+ installed packages will include:
47
+
48
+ * ruby
49
+ * sqlite
50
+ * sqlite-ruby
51
+ * arrayfields
52
+ * posixlock
53
+ * lockfile
54
+ * rq
55
+
56
+ the install.sh procedure is reccomended since
57
+
58
+ * a common nfs filesystem is required on which to store the queue anyhow
59
+
60
+ * installing all packages into this common nfs filesystem means no
61
+ software will be installed locally on any node which simplifies maintainance
62
+ and the addition of new nodes to the cluster.
63
+
64
+ * root privledges are not required for this technique
65
+
66
+ * this technique implies only two requirements for any node to join the cluster
67
+ * the nfs filesystem is visible to it
68
+ * a single crontab entry has been added
69
+
70
+ the user running ./depends/install.sh should have write permission (or sudo
71
+ write permission) to the /nfs mounted directory.
72
+
73
+ following are the steps for an install into the nfs mounted directory named
74
+ '/nfs'. absolutely NO packages or files will be installed outside this hierachy
75
+ so simply use 'rm -rf' to un-install.
76
+
77
+ tar xvfz rq-X.X.X.tgz
78
+
79
+ cd rq-X.X.X/
80
+
81
+ cd depends/
82
+
83
+ ./install.sh /nfs
84
+
85
+ ---
86
+ === EXAMPLE SETUP
87
+ ---
88
+
89
+ = path setup
90
+
91
+ the following instructions assume your PATH has been set appropriately on all
92
+ nodes. your path should be configured to include the bin directory of the nfs
93
+ prefix used in the install. eg:
94
+
95
+ export PATH=/nfs/bin/:$PATH
96
+
97
+ if this has been done correctly the output of
98
+
99
+ which rq
100
+
101
+ should report something like
102
+
103
+ /nfs/bin/rq
104
+
105
+ /nfs being the location you just installed into
106
+
107
+
108
+ = create the queue
109
+
110
+ you must create/initialize the queue structure. this should be done one time
111
+ only from any host
112
+
113
+ rq /nfs/queue create
114
+
115
+
116
+ = configure immortal feeders on all processing nodes
117
+
118
+ add crontab entries similar to below to keep immortal feeders alive on ALL
119
+ nodes. this will not start feeders every 15 mintues - it will only attempt to
120
+ start one every 15 minutes, failing silently if one is already running.
121
+
122
+ */15 * * * * /nfs/bin/rq /nfs/queue start
123
+
124
+ obviously you will need to edit the path to rq, the path to the queue, and
125
+ possibly the log path.
126
+
127
+ this can done automatically simply by running
128
+
129
+ rq /nfs/queue cron start
130
+
131
+ essentially the crontab entry simply ensures that a feeder daemon is running on
132
+ a node at all times, even after a reboot. a majority of the time the cron entry
133
+ will do nothing - only when no daemon is running will it be able to generate a
134
+ pidfile and lock it in order to start feeding. one feeder per queue per host is
135
+ allowed by default.
136
+
137
+ if you are not quite ready to setup crontab entries on a dozen machines and want
138
+ to start a feeder from a terminal for debugging/testing purposes you might use
139
+ something like this in a separate terminal/host from the one you'll submit jobs
140
+ from:
141
+
142
+ /nfs/bin/rq /nfs/queue feed --min_sleep=2 --max_sleep=4
143
+
144
+ which will log all output to STDERR and poll at a random interval between
145
+ --min_sleep and --max_sleep. a slight diversion is required here to explain
146
+ min_sleep and max_sleep.
147
+
148
+ once a feeder finds the queue to be empty it will begin polling the queue
149
+ periodically to see if any new jobs have come in. min_sleep and max_sleep
150
+ define the lower and upper bound of the polling interval, which is actually
151
+ chosen at random to be between these two numbers for performance reasons.
152
+
153
+ _never_ would you use the polling interval shown above (min_sleep=2,
154
+ max_sleep=4) in a production environment, such a rapid interval would _only_ be
155
+ for debugging/testing.
156
+
157
+ as reference the defaults for min_sleep and max_sleep are 42 and 240
158
+ respectively, and this gives good responsiveness on a cluster of 12-20 machines.
159
+ you can determine how long a job would sit in the queue, on average, before
160
+ being noticed (assuming all nodes are not busy when the job is submitted) using
161
+ this formula
162
+
163
+ median_sleep = ((max_sleep - min_sleep) / 2) + min_sleep
164
+
165
+ wait = n_nodes / median_sleep
166
+
167
+ so, for example, if you use the default min_sleep and max_sleep for 10 feeding
168
+ nodes a job would sit in the queue for, on average, about 10 seconds before
169
+ being picked up.
170
+
171
+ it's very important to note all this talk of polling applies _only_ to nodes
172
+ which have found the queue emtpy and are checking back at a regular interval to
173
+ look for more work. while the queue is full of jobs each node simply works to
174
+ capacity to take jobs, runs them, and return them to the queue - so there is no
175
+ polling. in fact, it's best to consider the min_sleep and max_sleep options as
176
+ affecting how quickly the cluster as a whole will tackle a freshly loaded queue:
177
+ once the queue is loaded every node is 'too busy' to poll any longer.
178
+
179
+ one other option which should be mentioned here is the --max_feed, -f option.
180
+ this option simply determines the number of concurrent jobs rq will run at once.
181
+ the default value is 2 - therefore two jobs will be running simoultaneously on
182
+ each feeding node when the queue has been loaded with jobs. this command
183
+
184
+ /nfs/bin/rq /nfs/queue feed --min_sleep=2 --max_sleep=4 --max_feed=1
185
+
186
+ might be used in a terminal (logging to stderr) to watch rq in action. limiting
187
+ the number of feeders to 1 makes the output somewhat easier to understand.
188
+ however, unless your machines cannot handle more than one of the jobs you plan
189
+ to submit it's best to keep this number > 1 for production clusters.
190
+
191
+
192
+ ---
193
+ === EXAMPLE USAGE
194
+ ---
195
+
196
+
197
+ = job submission
198
+
199
+ submit a command line job to the nfs mounted queue
200
+
201
+ rq /nfs/queue submit 'echo 42'
202
+
203
+ in this case the quoting is not needing but beware of shell expansion when
204
+ submitting jobs from the shell
205
+
206
+ submit a list of jobs from 'jobfile'. jobfile is simply a text file with one
207
+ command per line. blank lines and comments (#) are ignored.
208
+
209
+ rq /nfs/queue submit - < jobfile
210
+
211
+ = cluster status
212
+
213
+ check the status of your jobs (note that it may take cron a while to start
214
+ feeders depending on the interval used to maintain them in your crontab file)
215
+
216
+ rq /nfs/queue status
217
+
218
+ = where to go from here
219
+
220
+ for more info try
221
+
222
+ rq help
223
+
224
+
225
+ ---
226
+ === AUTHOR/ASSISTANCE/BUG REPORTS
227
+ ---
228
+
229
+ ara.t.howard@noaa.gov
230
+
data/VERSION ADDED
@@ -0,0 +1 @@
1
+ 3.4.3
data/bin/rq ADDED
@@ -0,0 +1,902 @@
1
+ #!/usr/bin/env ruby1.8
2
+
3
+ $: << '/var/lib/gems/1.8/gems/rq-ruby1.8-3.4.3/lib' # locate gem1.8 install
4
+ $: << '/usr/share/rq-ruby1.8/lib'
5
+
6
+ #
7
+ # === the rq program
8
+ #
9
+ # the rq program is the single command line interface by which all queue
10
+ # operations are affected. it always takes, as it's first argument, the name of
11
+ # the queue to be operated on. the second argument is always the mode of
12
+ # operation. the action taken and meaning of subsequent arguments depends
13
+ # directory on the mode of operation. for example the command
14
+ #
15
+ # rq queue create
16
+ #
17
+ # has the the mode _create_ and will create the queue _queue_. similarly the
18
+ # command
19
+ #
20
+ # rq queue submit my_job.sh huge_input_file.dat
21
+ #
22
+ # runs in _submit_ mode and will sumbit a job to _queue_.
23
+ #
24
+ # run
25
+ #
26
+ # rq --help
27
+ #
28
+ # or see README
29
+ #
30
+ # for the detailed instructions for each of the operation modes
31
+ #
32
+
33
+ begin
34
+ require 'rq'
35
+ rescue LoadError
36
+ # a guess really...
37
+ libdir = File.join(File.dirname(File.dirname(__FILE__)), 'lib')
38
+ require File.join(libdir, 'rq')
39
+ end
40
+ module RQ
41
+ #
42
+ # the Main class is responsible for parsing command line paramters and
43
+ # switches, doing some validation, initializing logging, and, ultimately,
44
+ # delegating the bulk of the work to a MainHelper based on the _mode_ given.
45
+ # the relationship between Main and MainHelper is a tight one by design - the
46
+ # primary purpose of it being to prevent the Main class from becoming 10000
47
+ # lines long. the delegators used include:
48
+ #
49
+ # * Creator
50
+ # * Submitter
51
+ # * Lister
52
+ # * StatusLister
53
+ # * Deleter
54
+ # * Updater
55
+ # * Querier
56
+ # * Executor
57
+ # * Configurator
58
+ # * Snapshotter
59
+ # * Locker
60
+ # * Backer
61
+ # * Rotater
62
+ # * Feeder
63
+ # * IOViewer
64
+ #
65
+ class Main
66
+ #--{{{
67
+ include Util
68
+ include Logging
69
+ include Usage
70
+
71
+ # an enumeration of option specifications used to parse command line
72
+ OPTSPEC =
73
+ #--{{{
74
+ [
75
+ [
76
+ '--priority=priority', '-p',
77
+ 'modes <submit, resubmit> : set the job(s) priority - lowest(0) .. highest(n) - (default 0)'
78
+ ],
79
+ [
80
+ '--tag=tag', '-t',
81
+ 'modes <submit, resubmit> : set the job(s) user data tag'
82
+ ],
83
+ [
84
+ '--runner=runner',
85
+ 'modes <submit, resubmit> : set the job(s) required runner(s)'
86
+ ],
87
+ [
88
+ '--restartable',
89
+ 'modes <submit, resubmit> : set the job(s) to be restartable on node reboot'
90
+ ],
91
+ [
92
+ '--stage',
93
+ 'modes <submit, resubmit> : set the job(s) initial state to be holding (default pending)'
94
+ ],
95
+ [
96
+ '--infile=infile', '-i',
97
+ 'modes <submit, resubmit> : infile'
98
+ ],
99
+ [
100
+ '--stdin=[stdin]', '-s',
101
+ 'modes <submit, resubmit, update> : stdin'
102
+ ],
103
+ [
104
+ '--data=data', '-d',
105
+ 'modes <submit, resubmit, update> : data'
106
+ ],
107
+
108
+ [
109
+ '--quiet', '-q',
110
+ 'modes <submit, resubmit, feed> : do not echo submitted jobs, fail silently if
111
+ another process is already feeding'
112
+ ],
113
+ [
114
+ '--daemon', '-D',
115
+ 'modes <feed> : spawn a daemon'
116
+ ],
117
+ [
118
+ '--max_feed=max_feed',
119
+ 'modes <feed> : the maximum number of concurrent jobs run'
120
+ ],
121
+ [
122
+ '--retries=retries',
123
+ 'modes <feed> : specify transaction retries'
124
+ ],
125
+ [
126
+ '--min_sleep=min_sleep',
127
+ 'modes <feed> : specify min sleep'
128
+ ],
129
+ [
130
+ '--max_sleep=max_sleep',
131
+ 'modes <feed> : specify max sleep'
132
+ ],
133
+ [
134
+ '--loops=loops', '-L',
135
+ 'modes <feed> : specify how many times to loop (default forever)'
136
+ ],
137
+ [
138
+ '--exit=exit_code_map',
139
+ 'modes <status> : specify and exit code map'
140
+ ],
141
+ [
142
+ '--fields=fields', '-f',
143
+ 'limit which fields of output to display'
144
+ ],
145
+ [
146
+ '--snapshot', '-s',
147
+ 'operate on snapshot of queue'
148
+ ],
149
+ [
150
+ '--editor=editor', '-e',
151
+ 'editor command capable of opening multiple files at once = (default ENV["RQ_EDITOR"] || "vim -R -o")'
152
+ ],
153
+ [
154
+ '--verbosity=[verbostiy]', '-v',
155
+ '0|fatal < 1|error < 2|warn < 3|info < 4|debug - (default info)'
156
+ ],
157
+ [
158
+ '--log=path','-l',
159
+ 'set log file - (default stderr)'
160
+ ],
161
+ [
162
+ '--log_age=log_age',
163
+ 'daily | weekly | monthly - what age will cause log rolling (default nil)'
164
+ ],
165
+ [
166
+ '--log_size=log_size',
167
+ 'size in bytes - what size will cause log rolling (default nil)'
168
+ ],
169
+ [
170
+ '--dot_rq_dir=[dot_rq_dir]',
171
+ 'base dir for log/pidfile storage (default ~/.rq/full/path/to/queue)'
172
+ ],
173
+ # [
174
+ # '--config=path',
175
+ # 'valid path - specify config file (default nil)'
176
+ # ],
177
+ # [
178
+ # '--template=[path]',
179
+ # 'valid path - generate a template config file in path (default stdout)'
180
+ # ],
181
+ [
182
+ '--help', '-h',
183
+ 'this message'
184
+ ],
185
+ [
186
+ '--version',
187
+ 'show version number'
188
+ ],
189
+ ]
190
+ #--}}}
191
+
192
+ # the default config file searched for has this basename
193
+ CONFIG_DEFAULT_PATH = 'rq.conf'
194
+
195
+ # config files are searched for using this list of locations
196
+ CONFIG_SEARCH_PATH = %w( . ~ /dmsp/reference/etc /usr/local/etc /usr/etc /etc )
197
+
198
+ # the queue can be specified in the environment
199
+ Q = ENV['RQ_Q'] || ENV['RQ_QUEUE']
200
+
201
+ attr :logger
202
+ attr :argv
203
+ attr :env
204
+ attr :program
205
+ attr :stdin
206
+ attr :job_stdin
207
+ attr :data
208
+ attr :cmd
209
+ attr :options
210
+ attr :qpath
211
+ attr :mode
212
+ attr :q
213
+ attr :daemon
214
+ attr :quiet
215
+ attr :loops
216
+ attr :fields
217
+ attr :dot_rq_dir
218
+
219
+ alias_method 'stdin?', 'stdin'
220
+ alias_method 'job_stdin?', 'job_stdin'
221
+ alias_method 'data?', 'data'
222
+ alias_method 'quiet?', 'quiet'
223
+
224
+ # given a command line and environment run the rq program
225
+ def initialize argv = ARGV, env = ENV
226
+ #--{{{
227
+ begin
228
+ @logger = Logger::new STDERR
229
+ @argv = Util::mcp(argv.to_a)
230
+ @env = Util::mcp(env.to_hash)
231
+ @program = $0
232
+ @cmd = ([File::expand_path($0)] + ARGV).join(' ')
233
+ @stdin = parse_stdin
234
+
235
+ parse_options
236
+
237
+ if(@options.has_key?('name'))
238
+ $0 = ([@options['name']] + ARGV).join(' ')
239
+ end
240
+
241
+ if(@options.has_key?('help') or @argv.include?('help'))
242
+ usage('port' => STDOUT, 'long' => true)
243
+ exit EXIT_SUCCESS
244
+ end
245
+
246
+ if(@options.has_key?('template') or (idx = @argv.index('template')))
247
+ gen_template(@options['template'] || @argv[idx + 1])
248
+ exit EXIT_SUCCESS
249
+ end
250
+
251
+ if @options.has_key?('version')
252
+ puts RQ::VERSION
253
+ exit EXIT_SUCCESS
254
+ end
255
+
256
+ if(@options.has_key?('stdin'))
257
+ @options['stdin'] ||= '-'
258
+ @job_stdin = @options['stdin']
259
+ end
260
+
261
+ if(@options.has_key?('quiet'))
262
+ @quiet = true
263
+ end
264
+
265
+ if(@options.has_key?('fields'))
266
+ @fields = @options['fields'].split(%r/,/).map{|f| f.strip}
267
+ @fields.uniq!
268
+ end
269
+
270
+ if(@options.has_key?('loops'))
271
+ @loops = @options['loops']
272
+ @loops = Integer @loops if @loops
273
+ end
274
+
275
+ parse_argv
276
+
277
+ setup_dot_rq_dir
278
+
279
+ status = run
280
+
281
+ case status
282
+ when Integer
283
+ exit status
284
+ else
285
+ exit(status ? EXIT_SUCCESS : EXIT_FAILURE)
286
+ end
287
+ rescue => e
288
+ unless SystemExit === e
289
+ logerr e
290
+ exit EXIT_FAILURE
291
+ else
292
+ exit e.status
293
+ end
294
+ end
295
+ #--}}}
296
+ end
297
+ # extract command lines args
298
+ def parse_argv
299
+ #--{{{
300
+ @qpath = Q || @argv.shift
301
+ @mode = @argv.shift
302
+ #--}}}
303
+ end
304
+ # determine storage for logs/pidfiles
305
+ def setup_dot_rq_dir
306
+ #--{{{
307
+ if(@options.has_key?('dot_rq_dir'))
308
+ @dot_rq_dir = @options['dot_rq_dir']
309
+ end
310
+ if @dot_rq_dir.nil?
311
+ home = ENV['HOME'] || File::expand_path('~') rescue abort("ENV['HOME'] is unset!")
312
+ @dot_rq_dir = File::join home, '.rq', @qpath
313
+ end
314
+ FileUtils.mkdir_p @dot_rq_dir
315
+ #--}}}
316
+ end
317
+ # select a MainHelper based on mode and delegate to it
318
+ def run
319
+ #--{{{
320
+ @qpath = Util::realpath @qpath
321
+
322
+ if @mode.nil? or @mode.strip.empty?
323
+ usage 'port' => STDERR, 'long' => false
324
+ exit EXIT_FAILURE
325
+ end
326
+
327
+ shortcuts = {
328
+ 'c' => 'create',
329
+ 's' => 'submit',
330
+ 'r' => 'resubmit',
331
+ 're' => 'resubmit',
332
+ 'l' => 'list',
333
+ 'ls' => 'list',
334
+ 't' => 'status',
335
+ 'd' => 'delete',
336
+ 'rm' => 'delete',
337
+ 'u' => 'update',
338
+ 'q' => 'query',
339
+ 'e' => 'execute',
340
+ 'C' => 'configure',
341
+ 'S' => 'snapshot',
342
+ 'L' => 'lock',
343
+ 'B' => 'backup',
344
+ 'R' => 'rotate',
345
+ 'h' => 'help',
346
+ 'H' => 'help',
347
+ 'f' => 'feed',
348
+ 'io' => 'ioview',
349
+ '0' => 'stdin',
350
+ '1' => 'stdout',
351
+ '2' => 'stderr',
352
+ 'to' => 'touch',
353
+ 'ta' => 'tail',
354
+ 'cron' => 'cron',
355
+ }
356
+
357
+ if((longmode = shortcuts[@mode]))
358
+ @mode = longmode
359
+ end
360
+
361
+ begin
362
+ case @mode
363
+ when 'create'
364
+ create
365
+ when 'submit'
366
+ submit
367
+ when 'resubmit'
368
+ resubmit
369
+ when 'list'
370
+ list
371
+ when 'status'
372
+ status
373
+ when 'delete'
374
+ delete
375
+ when 'update'
376
+ update
377
+ when 'query'
378
+ query
379
+ when 'execute'
380
+ execute
381
+ when 'configure'
382
+ configure
383
+ when 'snapshot'
384
+ snapshot
385
+ when 'lock'
386
+ lock
387
+ when 'backup'
388
+ backup
389
+ when 'rotate'
390
+ rotate
391
+ when 'help'
392
+ usage 'port' => STDOUT, 'long' => true
393
+ exit EXIT_SUCCESS
394
+ when 'feed'
395
+ feed
396
+ when 'start'
397
+ start
398
+ when 'shutdown'
399
+ shutdown
400
+ when 'stop'
401
+ stop
402
+ when 'restart'
403
+ restart
404
+ when 'pid'
405
+ pid
406
+ when 'feeder'
407
+ feeder
408
+ when 'recover'
409
+ recover
410
+ when 'ioview'
411
+ ioview
412
+ when 'stdin'
413
+ dump_stdin
414
+ when 'stdout'
415
+ dump_stdout
416
+ when 'stderr'
417
+ dump_stderr
418
+ when 'stdin4'
419
+ stdin4
420
+ when 'stdout4'
421
+ stdout4
422
+ when 'stderr4'
423
+ stderr4
424
+ when 'touch'
425
+ touch
426
+ when 'tail'
427
+ tail
428
+ when 'cron'
429
+ cron
430
+ when 'crontab'
431
+ crontab
432
+ else
433
+ raise "invalid mode <#{ @mode }>"
434
+ end
435
+ self
436
+ rescue Errno::EPIPE => e
437
+ raise if STDOUT.tty?
438
+ end
439
+
440
+ EXIT_SUCCESS
441
+ #--}}}
442
+ end
443
+ # delegated to a Creator
444
+ def create
445
+ #--{{{
446
+ init_logging
447
+ creator = Creator::new self
448
+ creator.create
449
+ #--}}}
450
+ end
451
+ # delegated to a Submitter
452
+ def submit
453
+ #--{{{
454
+ init_logging
455
+ submitter = Submitter::new self
456
+ submitter.submit
457
+ #--}}}
458
+ end
459
+ # delegated to a ReSubmitter
460
+ def resubmit
461
+ #--{{{
462
+ init_logging
463
+ resubmitter = ReSubmitter::new self
464
+ resubmitter.resubmit
465
+ #--}}}
466
+ end
467
+ # delegated to a Lister
468
+ def list
469
+ #--{{{
470
+ init_logging
471
+ @options['snapshot'] = true
472
+ lister = Lister::new self
473
+ lister.list
474
+ #--}}}
475
+ end
476
+ # delegated to a StatusLister
477
+ def status
478
+ #--{{{
479
+ init_logging
480
+ @options['snapshot'] = true
481
+ statuslister = StatusLister::new self
482
+ statuslister.statuslist
483
+ #--}}}
484
+ end
485
+ # delegated to a Deleter
486
+ def delete
487
+ #--{{{
488
+ init_logging
489
+ deleter = Deleter::new self
490
+ deleter.delete
491
+ #--}}}
492
+ end
493
+ # delegated to a Updater
494
+ def update
495
+ #--{{{
496
+ init_logging
497
+ updater = Updater::new self
498
+ updater.update
499
+ #--}}}
500
+ end
501
+ # delegated to a Querier
502
+ def query
503
+ #--{{{
504
+ init_logging
505
+ querier = Querier::new self
506
+ querier.query
507
+ #--}}}
508
+ end
509
+ # delegated to a Executor
510
+ def execute
511
+ #--{{{
512
+ init_logging
513
+ executor = Executor::new self
514
+ executor.execute
515
+ #--}}}
516
+ end
517
+ # delegated to a Configurator
518
+ def configure
519
+ #--{{{
520
+ init_logging
521
+ configurator = Configurator::new self
522
+ configurator.configure
523
+ #--}}}
524
+ end
525
+ # delegated to a Snapshotter
526
+ def snapshot
527
+ #--{{{
528
+ init_logging
529
+ snapshotter = Snapshotter::new self
530
+ snapshotter.snapshot
531
+ #--}}}
532
+ end
533
+ # delegated to a Locker
534
+ def lock
535
+ #--{{{
536
+ init_logging
537
+ locker = Locker::new self
538
+ locker.lock
539
+ #--}}}
540
+ end
541
+ # delegated to a Backer
542
+ def backup
543
+ #--{{{
544
+ init_logging
545
+ backer = Backer::new self
546
+ backer.backup
547
+ #--}}}
548
+ end
549
+ # delegated to a Rotater
550
+ def rotate
551
+ #--{{{
552
+ init_logging
553
+ rotater = Rotater::new self
554
+ rotater.rotate
555
+ #--}}}
556
+ end
557
+ # delegated to a Feeder
558
+ def feed
559
+ #--{{{
560
+ feeder = Feeder::new self
561
+ feeder.feed
562
+ #--}}}
563
+ end
564
+ # quietly start a daemon process
565
+ def start
566
+ #--{{{
567
+ unless exists
568
+ @options['daemon'] = true
569
+ @options['quiet'] = true
570
+ @options['log'] ||= File.join(@dot_rq_dir, 'log')
571
+ feeder = Feeder::new self
572
+ feeder.feed
573
+ end
574
+ #--}}}
575
+ end
576
+ # clean stop
577
+ def shutdown
578
+ #--{{{
579
+ pid = (exists and signal_feeder('TERM'))
580
+ puts "pid <#{ pid }> signaled to stop a.s.a.p" if pid
581
+ exit(Integer === pid ? 0 : 1)
582
+ #--}}}
583
+ end
584
+ # hard stop
585
+ def stop
586
+ #--{{{
587
+ pid = (exists and signal_feeder('KILL'))
588
+ puts "pid <#{ pid }> signaled to stop now" if pid
589
+ exit(Integer === pid ? 0 : 1)
590
+ #--}}}
591
+ end
592
+ # sighup based restart
593
+ def restart
594
+ #--{{{
595
+ pid = (exists and signal_feeder('HUP'))
596
+ puts "pid <#{ pid }> signaled to restart" if pid
597
+ exit(Integer === pid ? 0 : 1)
598
+ #--}}}
599
+ end
600
+ # is a feeder running?
601
+ def feeder
602
+ #--{{{
603
+ arg = @argv.shift
604
+ case arg
605
+ when /pid/
606
+ pid
607
+ else
608
+ puts "---\nfeeder : #{ exists ? true : false }"
609
+ end
610
+ #--}}}
611
+ end
612
+ # pid of any running feeder
613
+ def pid
614
+ #--{{{
615
+ puts "---\npid : #{ exists || '~' }"
616
+ #--}}}
617
+ end
618
+ # attempt sqlite db recovery
619
+ def recover
620
+ #--{{{
621
+ init_logging
622
+ recoverer = Recoverer::new self
623
+ recoverer.recover
624
+ #--}}}
625
+ end
626
+ # spawn external process to view stdin/stdout/stderr of jids
627
+ def ioview
628
+ #--{{{
629
+ init_logging
630
+ ioviewer = IOViewer::new self
631
+ ioviewer.ioview
632
+ #--}}}
633
+ end
634
+ # dump stdin for jid
635
+ def dump_stdin
636
+ #--{{{
637
+ dump_ios 'stdin', jids4(@argv)
638
+ #--}}}
639
+ end
640
+ # dump stdout for jid
641
+ def dump_stdout
642
+ #--{{{
643
+ dump_ios 'stdout', jids4(@argv)
644
+ #--}}}
645
+ end
646
+ # dump stderr for jid
647
+ def dump_stderr
648
+ #--{{{
649
+ dump_ios 'stderr', jids4(@argv)
650
+ #--}}}
651
+ end
652
+ # dump stdin path for jid
653
+ def stdin4 jids = nil
654
+ #--{{{
655
+ if jids
656
+ File.join @qpath, 'stdin', jids.to_s
657
+ else
658
+ jids = jids4 @argv
659
+ #STDOUT << "---\n"
660
+ jids.flatten.each do |jid|
661
+ iopath = File.join @qpath, 'stdin', jid.to_s
662
+ #STDOUT << " - " << iopath << "\n"
663
+ puts iopath
664
+ end
665
+ end
666
+ #--}}}
667
+ end
668
+ # dump stdout path for jid
669
+ def stdout4 jids = nil
670
+ #--{{{
671
+ if jids
672
+ File.join @qpath, 'stdout', jids.to_s
673
+ else
674
+ jids = jids4 @argv
675
+ #STDOUT << "---\n"
676
+ jids.flatten.each do |jid|
677
+ iopath = File.join @qpath, 'stdout', jid.to_s
678
+ #STDOUT << " - " << iopath << "\n"
679
+ puts iopath
680
+ end
681
+ end
682
+ #--}}}
683
+ end
684
+ # dump stderr path for jid
685
+ def stderr4 jids = nil
686
+ #--{{{
687
+ if jids
688
+ File.join @qpath, 'stderr', jids.to_s
689
+ else
690
+ jids = jids4 @argv
691
+ #STDOUT << "---\n"
692
+ jids.flatten.each do |jid|
693
+ iopath = File.join @qpath, 'stderr', jid.to_s
694
+ #STDOUT << " - " << iopath << "\n"
695
+ puts iopath
696
+ end
697
+ end
698
+ #--}}}
699
+ end
700
+ # delegated to a Toucher
701
+ def touch
702
+ #--{{{
703
+ init_logging
704
+ toucher = Toucher::new self
705
+ toucher.touch
706
+ #--}}}
707
+ end
708
+ # spawn external process to tail stdin/stdout/stderr of jids
709
+ def tail
710
+ #--{{{
711
+ @options['editor'] = 'tail -F'
712
+ init_logging
713
+ ioviewer = IOViewer::new self
714
+ ioviewer.ioview rescue nil
715
+ #--}}}
716
+ end
717
+ # add/delete crontab entry
718
+ def cron
719
+ #--{{{
720
+ init_logging
721
+ cron = Cron::new self
722
+ cron.cron
723
+ #--}}}
724
+ end
725
+ def crontab
726
+ #--{{{
727
+ argv.unshift 'tab'
728
+ init_logging
729
+ cron = Cron::new self
730
+ cron.cron
731
+ #--}}}
732
+ end
733
+
734
+ def dump_ios which, jids
735
+ #--{{{
736
+ jids.each do |jid|
737
+ iopath = send "#{ which }4", jid
738
+ begin
739
+ cat iopath
740
+ rescue
741
+ next
742
+ end
743
+ end
744
+ #--}}}
745
+ end
746
+ def cat path
747
+ #--{{{
748
+ system("cat #{ path } 2>/dev/null") or open(path){|f| f.each{|line| print line}}
749
+ #--}}}
750
+ end
751
+ def jids4 *list
752
+ #--{{{
753
+ jids = list.flatten.map{|elem| Integer elem}
754
+ #@stdin.each{|line| line.strip!; next if line.empty?; jids << line} if @stdin
755
+ if @stdin
756
+ mainhelper = MainHelper.new(self)
757
+ jobs = []
758
+ mainhelper.loadio @stdin, 'stdin', jobs
759
+ jobs.each{|job| jids << job['jid']}
760
+ jids.map!{|jid| Integer(jid) rescue abort("bad jid <#{ jid.inspect }>")}
761
+ end
762
+ jids
763
+ #--}}}
764
+ end
765
+
766
+ def exists
767
+ #--{{{
768
+ begin
769
+ signal_feeder 0
770
+ rescue Errno::ESRCH
771
+ false
772
+ end
773
+ #--}}}
774
+ end
775
+ def signal_feeder sig
776
+ #--{{{
777
+ feeder = Feeder::new self
778
+ pidfilepath = feeder.gen_pidfilepath
779
+ pid = Integer(IO::read(pidfilepath)) rescue nil
780
+ begin
781
+ Process::kill(sig, pid)
782
+ pid
783
+ rescue
784
+ nil
785
+ end
786
+ #--}}}
787
+ end
788
+ # parses '-' from cmdline, but not if it's after a '--'
789
+ def parse_stdin
790
+ #--{{{
791
+ dash_dash, dash = %w( -- - ).map{|d| @argv.index d}
792
+ if dash
793
+ if dash_dash
794
+ if dash < dash_dash
795
+ @argv.delete '-'
796
+ STDIN
797
+ end
798
+ else
799
+ @argv.delete '-'
800
+ STDIN
801
+ end
802
+ end
803
+ #--}}}
804
+ end
805
+ # uses OPTSPEC to parse command line switches
806
+ def parse_options
807
+ #--{{{
808
+ @op = OptionParser.new
809
+ @options = {}
810
+ OPTSPEC.each do |spec|
811
+ k = spec.first.gsub(%r/(?:--)|(?:=.*$)|(?:\s+)/o,'')
812
+ @op.def_option(*spec){|v| v = v.to_s; @options[k] = v.empty? ? nil : v}
813
+ #@op.def_option(*spec){|v| @options[k] = v}
814
+ end
815
+
816
+ if((env_opts = (ENV['RQ_OPTS'] || ENV['RQ_OPTIONS'])))
817
+ require 'shellwords'
818
+ @op.parse! Shellwords.shellwords(env_opts)
819
+ end
820
+
821
+ @op.parse! @argv
822
+
823
+ @options
824
+ #--}}}
825
+ end
826
+ # initialize logging object - all classes then use this object
827
+ def init_logging
828
+ #--{{{
829
+ log, log_age, log_size, verbosity =
830
+ @options.values_at 'log', 'log_age', 'log_size', 'verbosity'
831
+ log_age = atoi log_age rescue nil
832
+ log_size = atoi log_size rescue nil
833
+ $logger = @logger = Logger::new(log || STDERR, log_age, log_size)
834
+ #
835
+ # hack to fix Logger sync bug
836
+ #
837
+ @logger.class.instance_eval do
838
+ attr :logdev unless @logger.respond_to?(:logdev)
839
+ end
840
+
841
+ @logdev = @logger.logdev.dev
842
+ @logdev.sync = true
843
+ level = nil
844
+ verbosity ||=
845
+ if @options.has_key? 'verbosity'
846
+ 'debug'
847
+ else
848
+ 'info'
849
+ end
850
+ verbosity =
851
+ case verbosity
852
+ when /^\s*(?:4|d|debug)\s*$/io
853
+ level = 'Logging::DEBUG'
854
+ 4
855
+ when /^\s*(?:3|i|info)\s*$/io
856
+ level = 'Logging::INFO'
857
+ 3
858
+ when /^\s*(?:2|w|warn)\s*$/io
859
+ level = 'Logging::WARN'
860
+ 2
861
+ when /^\s*(?:1|e|error)\s*$/io
862
+ level = 'Logging::ERROR'
863
+ 1
864
+ when /^\s*(?:0|f|fatal)\s*$/io
865
+ level = 'Logging::FATAL'
866
+ 0
867
+ else
868
+ abort "illegal verbosity setting <#{ verbosity }>"
869
+ end
870
+ @logger.level = 2 - ((verbosity % 5) - 2)
871
+ #debug {"logging level <#{ level }>"}
872
+ @logger
873
+ #--}}}
874
+ end
875
+ # initialize configuration file - not currenlty utilized
876
+ def init_config
877
+ #--{{{
878
+ @config =
879
+ if @options['config']
880
+ ConfigFile::new(@options['config'])
881
+ else
882
+ ConfigFile::any CONFIG_DEFAULT_PATH, CONFIG_SEARCH_PATH
883
+ end
884
+ debug { "config.path <#{ @config.path }>" }
885
+ @config
886
+ #--}}}
887
+ end
888
+ # generate a template/sample config file which can then be edited
889
+ def gen_template template
890
+ #--{{{
891
+ ConfigFile::gen_template(template)
892
+ self
893
+ #--}}}
894
+ end
895
+ #--}}}
896
+ end
897
+ end
898
+
899
+ #
900
+ # run main program unless included as a library (testing purposes)
901
+ #
902
+ RQ::Main::new ARGV, ENV