torque_rm 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.document +5 -0
- data/.rspec +1 -0
- data/Gemfile +25 -0
- data/LICENSE.txt +20 -0
- data/README.html +230 -0
- data/README.md +231 -0
- data/Rakefile +52 -0
- data/VERSION +1 -0
- data/bin/torque_rm_rest +147 -0
- data/features/step_definitions/common_steps.rb +21 -0
- data/features/step_definitions/qsub_steps.rb +47 -0
- data/features/step_definitions/torque_rm_steps.rb +0 -0
- data/features/support/env.rb +13 -0
- data/features/torque_rm.feature +9 -0
- data/features/torque_rm/common.feature +22 -0
- data/features/torque_rm/qsub.feature +52 -0
- data/lib/torque_rm.rb +11 -0
- data/lib/torque_rm/common.rb +74 -0
- data/lib/torque_rm/qdel.rb +13 -0
- data/lib/torque_rm/qstat.rb +308 -0
- data/lib/torque_rm/qsub.rb +342 -0
- data/spec/spec_helper.rb +12 -0
- data/spec/torque_rm_spec.rb +7 -0
- data/torque_rm.gemspec +106 -0
- data/web/helpers/qstat.rb +19 -0
- data/web/views/qstat.haml +19 -0
- data/web/views/qstat_job.haml +14 -0
- metadata +271 -0
@@ -0,0 +1,308 @@
|
|
1
|
+
require 'date'
|
2
|
+
require 'json'
|
3
|
+
require 'json/add/core'
|
4
|
+
module TORQUE
|
5
|
+
class Qstat
|
6
|
+
FIELDS = %w(job_id job_name job_owner resources_used_cput resources_used_mem resources_used_vmem
|
7
|
+
resources_used_walltime job_state substate queue server checkpoint ctime error_path exec_host
|
8
|
+
exec_port hold_types join_path keep_files mail_points mail_users mtime output_path
|
9
|
+
priority qtime rerunable resource_list session_id shell_path_list variable_list
|
10
|
+
euser egroup hashname queue_rank queue_type comment etime
|
11
|
+
exit_status submit_args walltime_remaining start_time start_count fault_tolerant comp_time job_radix total_runtime
|
12
|
+
submit_host nppcu)
|
13
|
+
Job = Struct.new(:job_id, :job_name, :job_owner, :resources_used_cput, :resources_used_mem, :resources_used_vmem,
|
14
|
+
:resources_used_walltime, :job_state, :substate, :queue, :server, :checkpoint, :ctime, :error_path, :exec_host,
|
15
|
+
:exec_port, :hold_types, :join_path, :keep_files, :mail_points, :mail_users, :mtime, :output_path,
|
16
|
+
:priority, :qtime, :rerunable, :resource_list, :session_id,
|
17
|
+
:shell_path_list, :variable_list, :euser, :egroup, :hashname, :queue_rank, :queue_type, :comment,
|
18
|
+
:etime, :exit_status, :submit_args, :walltime_remaining, :start_time,
|
19
|
+
:start_count, :fault_tolerant, :comp_time, :job_radix, :total_runtime, :submit_host, :nppcu) do
|
20
|
+
#add here your custom method for Qstat::Job
|
21
|
+
|
22
|
+
alias :id :job_id
|
23
|
+
|
24
|
+
def is_runnig?
|
25
|
+
job_state == 'R'
|
26
|
+
end
|
27
|
+
alias running? is_runnig?
|
28
|
+
|
29
|
+
def is_completed?
|
30
|
+
job_state == 'C'
|
31
|
+
end
|
32
|
+
alias completed? is_completed?
|
33
|
+
|
34
|
+
def is_exited?
|
35
|
+
job_state == 'E'
|
36
|
+
end
|
37
|
+
alias exited? is_exited?
|
38
|
+
|
39
|
+
def is_queued?
|
40
|
+
job_state == 'Q'
|
41
|
+
end
|
42
|
+
alias queued? is_queued?
|
43
|
+
alias is_in_queue? is_queued?
|
44
|
+
|
45
|
+
def time
|
46
|
+
return (resources_used_walltime) ? resources_used_walltime : "-"
|
47
|
+
end
|
48
|
+
|
49
|
+
def memory
|
50
|
+
resources_used_mem ? (resources_used_mem.split("kb").first.to_f/1000).round(1) : "0"
|
51
|
+
end
|
52
|
+
|
53
|
+
def node
|
54
|
+
exec_host ? exec_host.split("+").map {|n| n.split(".").first}.uniq.join(",") : "-"
|
55
|
+
end
|
56
|
+
|
57
|
+
def procs
|
58
|
+
resource_list.each do |r|
|
59
|
+
resource = r[:resource]
|
60
|
+
if resource[:name] == "ncpus"
|
61
|
+
return resource[:value]
|
62
|
+
elsif resource[:name] == "nodes"
|
63
|
+
return resource[:value].split("ppn=")[-1]
|
64
|
+
end
|
65
|
+
end
|
66
|
+
return "-"
|
67
|
+
end
|
68
|
+
|
69
|
+
def fields
|
70
|
+
FIELDS + %w( is_runnig? is_queued? is_exited? is_completed? time memory node )
|
71
|
+
end
|
72
|
+
|
73
|
+
def self.fields
|
74
|
+
FIELDS + %w( is_runnig? is_queued? is_exited? is_completed? time memory node )
|
75
|
+
end
|
76
|
+
|
77
|
+
#alias to_hash to_h
|
78
|
+
|
79
|
+
def to_map
|
80
|
+
map = Hash.new
|
81
|
+
self.members.each { |m| map[m] = self[m] }
|
82
|
+
map
|
83
|
+
end
|
84
|
+
|
85
|
+
def to_json(*a)
|
86
|
+
to_map.to_json(*a)
|
87
|
+
end
|
88
|
+
|
89
|
+
def self.json_load(json)
|
90
|
+
JSON.load(json)
|
91
|
+
end
|
92
|
+
|
93
|
+
def rm
|
94
|
+
Qdel.rm(job_id)
|
95
|
+
end
|
96
|
+
alias :del :rm
|
97
|
+
alias :delete :rm
|
98
|
+
|
99
|
+
end # Job
|
100
|
+
|
101
|
+
class Parser < Parslet::Parser
|
102
|
+
rule(:newline) { match('\n').repeat(1) }
|
103
|
+
rule(:space) { match('\s').repeat }
|
104
|
+
rule(:space?) { space.maybe }
|
105
|
+
rule(:tab) { match('\t').repeat(1) }
|
106
|
+
rule(:newline?) { newline.maybe }
|
107
|
+
rule(:value) { match('[a-zA-Z0-9\.\_\@\/\+ \,\-:=]').repeat }
|
108
|
+
rule(:qstat) { job_id.repeat }
|
109
|
+
rule(:resource_list_name) { str("Resource_List") >> str(".") >> (match('[a-zA-Z]').repeat(1).as(:string)).as(:name) }
|
110
|
+
rule(:split_assignment) { (space >> str("=") >> space).repeat(1) }
|
111
|
+
root(:qstat)
|
112
|
+
|
113
|
+
rule(:variable_item){ tab >> value >> newline }
|
114
|
+
rule(:variable_items) { variable_item.repeat }
|
115
|
+
rule(:variable_list_items) { value >> newline >> variable_items.maybe}
|
116
|
+
|
117
|
+
|
118
|
+
rule(:job_id) {(str("Job Id:") >> space >> value.as(:string)).as(:job_id) >> newline? >> fields.maybe >> newline? }
|
119
|
+
rule(:job_name) {(space >> str("Job_Name = ") >> value.as(:string) >> newline).as(:job_name)}
|
120
|
+
rule(:job_owner) {(space >> str("Job_Owner = ") >> value.as(:string) >> newline).as(:job_owner)}
|
121
|
+
rule(:resources_used_cput) {(space >> str("resources_used.cput = ") >> value.as(:string) >> newline).as(:resources_used_cput)}
|
122
|
+
rule(:resources_used_mem) {(space >> str("resources_used.mem = ") >> value.as(:string) >> newline).as(:resources_used_mem)}
|
123
|
+
rule(:resources_used_vmem) {(space >> str("resources_used.vmem = ") >> value.as(:string) >> newline).as(:resources_used_vmem)}
|
124
|
+
rule(:resources_used_walltime) {(space >> str("resources_used.walltime = ") >> value.as(:string) >> newline).as(:resources_used_walltime)}
|
125
|
+
rule(:job_state) {(space >> str("job_state = ") >> value.as(:string) >> newline).as(:job_state)}
|
126
|
+
rule(:queue) {(space >> str("queue = ") >> value.as(:string) >> newline).as(:queue)}
|
127
|
+
rule(:server) {(space >> str("server = ") >> value.as(:string) >> newline).as(:server)}
|
128
|
+
rule(:checkpoint) {(space >> str("Checkpoint = ") >> value.as(:string) >> newline).as(:checkpoint)}
|
129
|
+
rule(:ctime) {(space >> str("ctime = ") >> value.as(:datetime) >> newline).as(:ctime)}
|
130
|
+
rule(:error_path) {(space >> str("Error_Path = ") >> value.as(:string) >> newline).as(:error_path)}
|
131
|
+
rule(:exec_host) {(space >> str("exec_host = ") >> value.as(:string) >> newline).as(:exec_host)}
|
132
|
+
rule(:exec_port) {(space >> str("exec_port = ") >> value.as(:string) >> newline).as(:exec_port)}
|
133
|
+
rule(:hold_types) {(space >> str("Hold_Types = ") >> value.as(:string) >> newline).as(:hold_types)}
|
134
|
+
rule(:join_path) {(space >> str("Join_Path = ") >> value.as(:string) >> newline).as(:join_path)}
|
135
|
+
rule(:keep_files) {(space >> str("Keep_Files = ") >> value.as(:string) >> newline).as(:keep_files)}
|
136
|
+
rule(:mail_points) {(space >> str("Mail_Points = ") >> value.as(:string) >> newline).as(:mail_points)}
|
137
|
+
rule(:mail_users) {(space >> str("Mail_Users = ") >> value.as(:string) >> newline).as(:mail_users)}
|
138
|
+
rule(:mail_users?) {mail_users.maybe }
|
139
|
+
rule(:mtime) {(space >> str("mtime = ") >> value.as(:datetime) >> newline).as(:mtime)}
|
140
|
+
rule(:output_path) {(space >> str("Output_Path = ") >> value.as(:string) >> newline).as(:output_path)}
|
141
|
+
rule(:priority) {(space >> str("Priority = ") >> value.as(:integer) >> newline).as(:priority)}
|
142
|
+
rule(:qtime) {(space >> str("qtime = ") >> value.as(:datetime) >> newline).as(:qtime)}
|
143
|
+
rule(:rerunable) {(space >> str("Rerunable = ") >> value.as(:boolean) >> newline).as(:rerunable)}
|
144
|
+
|
145
|
+
rule(:resource) {(space >> resource_list_name >> str(" = ") >> (value.as(:string)).as(:value) >> newline).as(:resource)}
|
146
|
+
rule(:resource_list) { resource.repeat.as(:resource_list)}
|
147
|
+
|
148
|
+
rule(:session_id) {(space >> str("session_id = ") >> value.as(:integer) >> newline?).as(:session_id)}
|
149
|
+
rule(:substate) {(space >> str("substate = ") >> value.as(:integer) >> newline?).as(:substate)} # Torque 2.4.16
|
150
|
+
rule(:shell_path_list) {(space >> str("Shell_Path_List = ") >> value.as(:string) >> newline?).as(:shell_path_list)}
|
151
|
+
rule(:variable_list) {(space >> str("Variable_List = ") >> variable_list_items.as(:string) >> newline?).as(:variable_list)}
|
152
|
+
rule(:euser) {(space >> str("euser = ") >> value.as(:string) >> newline?).as(:euser)} # Torque 2.4.16
|
153
|
+
rule(:egroup) {(space >> str("egroup = ") >> value.as(:string) >> newline?).as(:egroup)} # Torque 2.4.16
|
154
|
+
rule(:hashname) {(space >> str("hashname = ") >> value.as(:string) >> newline?).as(:hashname)} # Torque 2.4.16
|
155
|
+
rule(:queue_rank) {(space >> str("queue_rank = ") >> value.as(:string) >> newline?).as(:queue_rank)} # Torque 2.4.16
|
156
|
+
rule(:queue_type) {(space >> str("queue_type = ") >> value.as(:string) >> newline?).as(:queue_type)} # Torque 2.4.16
|
157
|
+
rule(:comment) {(space >> str("comment = ") >> value.as(:string) >> newline?).as(:comment)} # Torque 2.4.16
|
158
|
+
rule(:etime) {(space >> str("etime = ") >> value.as(:datetime) >> newline?).as(:etime)}
|
159
|
+
rule(:exit_status) {(space >> str("exit_status = ") >> value.as(:string) >> newline?).as(:exit_status)}
|
160
|
+
rule(:submit_args) {(space >> str("submit_args = ") >> value.as(:string) >> newline?).as(:submit_args)}
|
161
|
+
rule(:start_time) {(space >> str("start_time = ") >> value.as(:datetime) >> newline?).as(:start_time)}
|
162
|
+
rule(:walltime_remaining) {(space >> str("Walltime.Remaining = ") >> value.as(:integer) >> newline?).as(:walltime_remaining)} # Torque 2.4.16
|
163
|
+
rule(:start_count) {(space >> str("start_count = ") >> value.as(:integer) >> newline?).as(:start_count)}
|
164
|
+
rule(:fault_tolerant) {(space >> str("fault_tolerant = ") >> value.as(:boolean) >> newline?).as(:fault_tolerant)}
|
165
|
+
rule(:comp_time) {(space >> str("comp_time = ") >> value.as(:datetime) >> newline?).as(:comp_time)}
|
166
|
+
rule(:job_radix) {(space >> str("job_radix = ") >> value.as(:string) >> newline?).as(:job_radix)}
|
167
|
+
rule(:total_runtime) {(space >> str("total_runtime = ") >> value.as(:string) >> newline?).as(:total_runtime)}
|
168
|
+
rule(:submit_host) {(space >> str("submit_host = ") >> value.as(:string) >> newline?).as(:submit_host)}
|
169
|
+
rule(:nppcu) {(space >> str("nppcu = ") >> value.as(:integer) >> newline?).as(:nppcu)} #Torque 4.2.5 / Maui 3.3.1
|
170
|
+
|
171
|
+
# a lot of maybe, maybe everything
|
172
|
+
|
173
|
+
rule(:fields) { job_name.maybe >> job_owner.maybe >> resources_used_cput.maybe >> resources_used_mem.maybe >>
|
174
|
+
resources_used_vmem.maybe >> resources_used_walltime.maybe >> job_state.maybe >> queue.maybe >> server.maybe >>
|
175
|
+
checkpoint.maybe >> ctime.maybe >> error_path.maybe >> exec_host.maybe >> exec_port.maybe >> hold_types.maybe >>
|
176
|
+
join_path.maybe >> keep_files.maybe >> mail_points.maybe >> mail_users.maybe >> mtime.maybe >> output_path.maybe >>
|
177
|
+
tab.maybe >> newline? >> priority.maybe >> qtime.maybe >> rerunable.maybe >>
|
178
|
+
resource_list.maybe >> session_id.maybe >> substate.maybe >> shell_path_list.maybe >>
|
179
|
+
variable_list.maybe >>
|
180
|
+
euser.maybe >> egroup.maybe >> hashname.maybe >>
|
181
|
+
queue_rank.maybe >> queue_type.maybe >>
|
182
|
+
comment.maybe >> etime.maybe >> exit_status.maybe >>
|
183
|
+
submit_args.maybe >> start_time .maybe >>
|
184
|
+
walltime_remaining.maybe >> start_count.maybe >> fault_tolerant.maybe >> comp_time.maybe >> job_radix.maybe >> total_runtime.maybe >>
|
185
|
+
submit_host.maybe >> nppcu.maybe >>
|
186
|
+
newline?
|
187
|
+
}
|
188
|
+
|
189
|
+
|
190
|
+
end #Parser
|
191
|
+
|
192
|
+
class Trans < Parslet::Transform
|
193
|
+
rule(:datetime => simple(:datetime)) {DateTime.parse(datetime)}
|
194
|
+
rule(:string => simple(:string)) {String(string)}
|
195
|
+
rule(:integer => simple(:integer)) {Integer(integer)}
|
196
|
+
rule(:boolean => simple(:boolean)) {String(boolean) == "True"}
|
197
|
+
end #Trans
|
198
|
+
|
199
|
+
|
200
|
+
def initialize
|
201
|
+
@parser = Parser.new
|
202
|
+
@transformer = Trans.new
|
203
|
+
@last_query = nil #cache last query, it can be useful to generate some kind of statistics ?
|
204
|
+
end #initialize
|
205
|
+
|
206
|
+
def self.fields
|
207
|
+
FIELDS
|
208
|
+
end
|
209
|
+
|
210
|
+
def fields
|
211
|
+
FIELDS
|
212
|
+
end
|
213
|
+
# hash can contain keys:
|
214
|
+
# type = :raw just print a string
|
215
|
+
# job_id = job.id it will print info only about the specified job
|
216
|
+
# job_ids = ["1.server", "2.server", "3.server"] get an array for requested jobs
|
217
|
+
def query(hash={})
|
218
|
+
result = TORQUE.server.qstat("-f")
|
219
|
+
results = nil
|
220
|
+
if hash[:type] == :raw
|
221
|
+
result.to_s
|
222
|
+
else
|
223
|
+
|
224
|
+
begin
|
225
|
+
# puts result.to_s.inspect
|
226
|
+
# puts result.to_s.gsub(/\n\t/,'').inspect
|
227
|
+
results = @transformer.apply(@parser.parse(result.to_s.gsub(/\n\t/,'')))
|
228
|
+
rescue Parslet::ParseFailed => failure
|
229
|
+
puts failure.cause.ascii_tree
|
230
|
+
end
|
231
|
+
|
232
|
+
results = [] if results.is_a?(String) && results.empty?
|
233
|
+
if hash.key? :job_id
|
234
|
+
# if hash[:job_id]..is_a? String
|
235
|
+
|
236
|
+
results.select! {|j| (hash[:job_id].to_s == j[:job_id] || hash[:job_id].to_s == j[:job_id].to_s.split(".").first)}
|
237
|
+
# else
|
238
|
+
# warn "You gave me #{hash[:job_id].class}, only String is supported."
|
239
|
+
# end
|
240
|
+
elsif hash.key? :job_ids
|
241
|
+
if hash[:job_ids].is_a? Array
|
242
|
+
results.select! {|j| (hash[:job_ids].include?(j[:job_id].to_s) || hash[:job_ids].include?(j[:job_id].to_s.split(".").first))}
|
243
|
+
elsif hash[:job_ids].is_a? String
|
244
|
+
warn "To be implemented for String object."
|
245
|
+
else
|
246
|
+
warm "To be implemented for #{hash[:job_ids].class}"
|
247
|
+
end
|
248
|
+
else
|
249
|
+
results
|
250
|
+
end
|
251
|
+
@last_query = from_parselet_to_jobs(results)
|
252
|
+
end
|
253
|
+
end #query
|
254
|
+
|
255
|
+
def display(hash={})
|
256
|
+
query(hash)
|
257
|
+
print_jobs_table(@last_query)
|
258
|
+
end
|
259
|
+
|
260
|
+
def mock(results)
|
261
|
+
from_parselet_to_jobs(results)
|
262
|
+
end
|
263
|
+
|
264
|
+
|
265
|
+
private
|
266
|
+
|
267
|
+
def from_parselet_to_jobs(results)
|
268
|
+
results.map do |raw_job|
|
269
|
+
job = Job.new
|
270
|
+
raw_job.each_pair do |key, value|
|
271
|
+
job.send "#{key}=", value
|
272
|
+
end #each pair
|
273
|
+
job
|
274
|
+
end #each_job
|
275
|
+
end
|
276
|
+
|
277
|
+
def print_jobs_table(jobs_info)
|
278
|
+
rows = []
|
279
|
+
head = ["Job ID","Job Name","Node(s)","Procs (per node)","Mem Used","Run Time","Queue","Status"]
|
280
|
+
headings = head.map {|h| {:value => h, :alignment => :center}}
|
281
|
+
if jobs_info.empty?
|
282
|
+
print "\n\nNo Running jobs for user: ".light_red+"#{`whoami`}".green+"\n\n"
|
283
|
+
exit
|
284
|
+
else
|
285
|
+
jobs_info.each do |job|
|
286
|
+
line = [job.job_id.split(".").first,job.job_name,job.node,job.procs,"#{job.memory} mb","#{job.time}",job.queue,job.job_state]
|
287
|
+
if job.completed?
|
288
|
+
line[-1] = "Completed"; rows << line.map {|l| l.underline}
|
289
|
+
elsif job.queued?
|
290
|
+
line[-1] = "Queued"; rows << line.map {|l| l.light_blue}
|
291
|
+
elsif job.running?
|
292
|
+
line[-1] = "Running"; rows << line.map {|l| l.green}
|
293
|
+
elsif job.exited?
|
294
|
+
line[-1] = "Exiting"; rows << line.map {|l| l.green.blink}
|
295
|
+
else
|
296
|
+
rows << line.map {|l| l.red.blink}
|
297
|
+
end
|
298
|
+
end
|
299
|
+
print "\nSummary of submitted jobs for user: ".light_blue+"#{jobs_info.first[:job_owner].split("@").first.green}\n\n"
|
300
|
+
table = Terminal::Table.new :headings => headings, :rows => rows
|
301
|
+
Range.new(0,table.number_of_columns-1).to_a.each {|c| table.align_column(c,:center) } # set all columns alignment to :center
|
302
|
+
puts table
|
303
|
+
end
|
304
|
+
|
305
|
+
end
|
306
|
+
|
307
|
+
end # Qstat
|
308
|
+
end # TORQUE
|
@@ -0,0 +1,342 @@
|
|
1
|
+
# require 'ostruct'
|
2
|
+
module TORQUE
|
3
|
+
|
4
|
+
|
5
|
+
# q=PBS::Qsub.new name: "Mapping", m: "abe", l: "nodes=1:ppn=4", d: '/mnt/bio/ngs/pbs', e: '/mnt/bio/ngs/pbs', shell: '/bin/bash', o: '/mnt/bio/ngs/pbs';
|
6
|
+
|
7
|
+
|
8
|
+
# [-a date_time] [-A account_string] [-b secs] [-c checkpoint_options]
|
9
|
+
# [-C directive_prefix] [-d path] [-D path] [-e path] [-f] [-h]
|
10
|
+
# [-I ] [-j join ] [-k keep ] [-l resource_list ]
|
11
|
+
# [-m mail_options] [-M user_list] [-N name] [-o path]
|
12
|
+
# [-p priority] [-P user[:group]] [-q destination] [-r c] [-S path_list]
|
13
|
+
# [-t array_request] [-u user_list]
|
14
|
+
# [-v variable_list] [-V ] [-W additional_attributes] [-X] [-z] [script]
|
15
|
+
|
16
|
+
|
17
|
+
class Qsub
|
18
|
+
attr_accessor :a, :A,:b,:c,:C,:d,:D,:e,:f,:h,:I,:j,:k,:l
|
19
|
+
attr_accessor :m,:M,:N,:o,:p,:P,:q,:r,:S,:t,:u,:v,:V,:W,:X,:z, :script
|
20
|
+
attr_accessor :walltime,:gres,:ppn, :procs
|
21
|
+
attr_accessor :id
|
22
|
+
attr_writer :nodes
|
23
|
+
|
24
|
+
# def script(*args)
|
25
|
+
# if args.size == 1
|
26
|
+
# @script = args[0]
|
27
|
+
# else
|
28
|
+
# @script
|
29
|
+
# end
|
30
|
+
# end
|
31
|
+
|
32
|
+
|
33
|
+
alias :cpus :ppn
|
34
|
+
alias :cpus= :ppn=
|
35
|
+
alias :shell :S
|
36
|
+
alias :shell= :S=
|
37
|
+
alias :name :N
|
38
|
+
alias :name= :N=
|
39
|
+
alias :queue :q
|
40
|
+
alias :queue= :q=
|
41
|
+
alias :account :A
|
42
|
+
alias :account= :A=
|
43
|
+
alias :when :a
|
44
|
+
alias :when= :a=
|
45
|
+
alias :checkpoint :c
|
46
|
+
alias :checkpoint= :c=
|
47
|
+
alias :wd :d
|
48
|
+
alias :wd= :d=
|
49
|
+
alias :working_directory :d
|
50
|
+
alias :working_directory= :d=
|
51
|
+
alias :root_directory :D
|
52
|
+
alias :root_directory= :D=
|
53
|
+
alias :email :M
|
54
|
+
alias :email= :M=
|
55
|
+
alias :stderr :e
|
56
|
+
alias :stderr= :e=
|
57
|
+
alias :stdout :o
|
58
|
+
alias :stdout= :o=
|
59
|
+
alias :run_as_user :P
|
60
|
+
alias :run_as_user= :P=
|
61
|
+
alias :rerunnable :r
|
62
|
+
alias :rerunnable= :r=
|
63
|
+
alias :user_list :u
|
64
|
+
alias :user_list= :u=
|
65
|
+
alias :variable_list :v
|
66
|
+
alias :variable_list= :v=
|
67
|
+
alias :exports :V
|
68
|
+
alias :exports= :V=
|
69
|
+
alias :additional_attributes :X
|
70
|
+
alias :additional_attributes= :X=
|
71
|
+
|
72
|
+
def initialize(opts={}, &block)
|
73
|
+
@id = nil # configure when the job is submitted
|
74
|
+
@a =opts[:a] || opts[:date_time]
|
75
|
+
@A = opts[:A] || opts[:account]
|
76
|
+
@b = opts[:b]
|
77
|
+
@c = validate_checkpoint(opts[:c] || opts[:checkpoint])
|
78
|
+
@C = opts[:C] || opts[:directive_prefix]
|
79
|
+
@d = opts[:d] || opts[:working_directory] # PBS_O_INITDIR
|
80
|
+
@D = opts[:D] || opts[:root_directory] # PBS_O_ROOTDIR
|
81
|
+
@e = opts[:e] || opts[:stderr] # [hostname:]path_name
|
82
|
+
@f = opts[:f] || opts[:fault_tolerant] # boolean
|
83
|
+
@h = opts[:h] || opts[:user_hold] # boolean
|
84
|
+
@I = opts[:I] || opts[:interactive]
|
85
|
+
@j = opts[:j] || opts[:join_stdout_stderr]
|
86
|
+
@k = validate_keep(opts) # check manual because I'm not going to implement this now.
|
87
|
+
@l = opts[:l]
|
88
|
+
@nodes = opts[:nodes]
|
89
|
+
@walltime = opts[:walltime]
|
90
|
+
@gres = opts[:gres]
|
91
|
+
@ppn = opts[:ppn]
|
92
|
+
@procs = opts[:procs]
|
93
|
+
@m = validate_mail_options(opts)
|
94
|
+
@M = opts[:M] || opts[:email]
|
95
|
+
@N = opts[:N] || opts[:name]
|
96
|
+
@o = opts[:o] || opts[:stdout] # [hostname:]path_name
|
97
|
+
@p = validate_priority(opts) # between -1024, +1023
|
98
|
+
@P = opts[:P] || opts[:root_as_user]
|
99
|
+
@q = opts[:q] || opts[:queue]
|
100
|
+
@r = opts[:r] || opts[:rerunnable] # y|n
|
101
|
+
@S = opts[:S] || opts[:shell]
|
102
|
+
@t = opts[:t] || opts[:array_request]
|
103
|
+
@u = opts[:u] || opts[:user_list]
|
104
|
+
@v = opts[:v] || opts[:variable_list]
|
105
|
+
@V = opts[:V] || opts[:exports] #this is just a boolean
|
106
|
+
@W = opts[:W] || opts[:additional_attributes] # to DEVELOP, chaining jobs together.
|
107
|
+
@X = opts[:X] || opts[:X_forwardning] # boolean
|
108
|
+
@z = opts[:z] || opts[:no_jobid]
|
109
|
+
@script = opts[:script]
|
110
|
+
if block_given?
|
111
|
+
if block.arity == 1
|
112
|
+
yield self
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
end # initialize
|
117
|
+
|
118
|
+
def config(&block)
|
119
|
+
if block_given?
|
120
|
+
if block.arity == 1
|
121
|
+
yield self
|
122
|
+
else
|
123
|
+
instance_eval &block
|
124
|
+
self
|
125
|
+
end
|
126
|
+
end
|
127
|
+
end # config
|
128
|
+
|
129
|
+
def l
|
130
|
+
data=[@l, nodes, @walltime, @gres].select{|x| x}.join(',')
|
131
|
+
if data.empty?
|
132
|
+
nil
|
133
|
+
else
|
134
|
+
data
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
def nodes
|
139
|
+
str_nodes = if @nodes
|
140
|
+
@nodes
|
141
|
+
elsif ppn
|
142
|
+
"nodes=1"
|
143
|
+
end
|
144
|
+
|
145
|
+
if ppn
|
146
|
+
str_nodes << ':' << "ppn=#{ppn}"
|
147
|
+
elsif procs
|
148
|
+
str_nodes << '+' << "procs=#{procs}"
|
149
|
+
end
|
150
|
+
|
151
|
+
end
|
152
|
+
|
153
|
+
|
154
|
+
def to_s
|
155
|
+
pbs_script = ""
|
156
|
+
[:a, :A,:b,:c,:C,:d,:D,:e,:f,:h,:I,:j,:k,:l,:m,:M,:N,:o,:p,:P,:q,:r,:S,:t,:u,:v,:V,:W,:X,:z].each do |option|
|
157
|
+
value = send(option)
|
158
|
+
pbs_script << "#PBS -#{option} #{value}\n" unless value.nil?
|
159
|
+
end
|
160
|
+
pbs_script << "#{script}" unless script.nil?
|
161
|
+
if script.nil?
|
162
|
+
warn("You are converting this qsub job into a script without a real code.")
|
163
|
+
end
|
164
|
+
pbs_script
|
165
|
+
end
|
166
|
+
|
167
|
+
# Create a qsub job on the remote server and then submits it
|
168
|
+
# return the job_id from qsub and set it as a job variable.
|
169
|
+
# :dry => true will only transfer the file to the destination server and will not submit the job to the scheduler
|
170
|
+
# the job object will not have an id associated.
|
171
|
+
def submit(opts={dry: false})
|
172
|
+
TORQUE.server.file_upload StringIO.new(to_s), script_absolute_filename
|
173
|
+
@id = TORQUE.server.qsub(script_absolute_filename).first unless opts[:dry] == true
|
174
|
+
end
|
175
|
+
|
176
|
+
# get the stats for this job
|
177
|
+
def stat
|
178
|
+
if id.nil?
|
179
|
+
warn("No job submitted")
|
180
|
+
else
|
181
|
+
@qstat = @qstat || TORQUE::Qstat.new
|
182
|
+
@qstat.query(job_id: id)
|
183
|
+
end
|
184
|
+
end
|
185
|
+
|
186
|
+
|
187
|
+
# delete this job from the queue
|
188
|
+
def rm
|
189
|
+
if id.nil?
|
190
|
+
warn("No job submitted")
|
191
|
+
else
|
192
|
+
TORQUE::Qdel.rm(id)
|
193
|
+
end
|
194
|
+
end
|
195
|
+
|
196
|
+
|
197
|
+
|
198
|
+
|
199
|
+
|
200
|
+
|
201
|
+
|
202
|
+
# def to_s
|
203
|
+
|
204
|
+
# <<-TOS
|
205
|
+
# #!/bin/bash
|
206
|
+
# #PBS -S /bin/bash
|
207
|
+
# #PBS -m abe
|
208
|
+
# #PBS -N #{task.name}
|
209
|
+
# #PBS -l nodes=1:ppn=#{task.cpus}
|
210
|
+
# #PBS -d #{task.wd}
|
211
|
+
# #PBS -e #{task.wd}
|
212
|
+
# #PBS -o #{task.wd}
|
213
|
+
# #{task.command}
|
214
|
+
# TOS
|
215
|
+
# end # to_s
|
216
|
+
|
217
|
+
private
|
218
|
+
|
219
|
+
# get the current work directory.
|
220
|
+
# if root_directory is defined it will get precedence on working_directory
|
221
|
+
# if root or working directories are not defined the user home directory is
|
222
|
+
# the default directory
|
223
|
+
def script_dir
|
224
|
+
root_directory || working_directory || '~'
|
225
|
+
end
|
226
|
+
|
227
|
+
def script_filename
|
228
|
+
"#{name}.pbs"
|
229
|
+
end
|
230
|
+
|
231
|
+
def script_absolute_filename
|
232
|
+
File.join(script_dir,script_filename)
|
233
|
+
end
|
234
|
+
|
235
|
+
|
236
|
+
# Defines the options that will apply to the job. If the job executes upon a host which does not support checkpoint, these options will be ignored.
|
237
|
+
# Valid checkpoint options are:
|
238
|
+
|
239
|
+
# none - No checkpointing is to be performed.
|
240
|
+
# enabled - Specify that checkpointing is allowed but must be explicitly invoked by either the qhold or qchkpt commands.
|
241
|
+
# shutdown - Specify that checkpointing is to be done on a job at pbs_mom shutdown.
|
242
|
+
# periodic - Specify that periodic checkpointing is enabled. The default interval is 10 minutes and can be changed by the $checkpoint_interval option in the mom config file or by specifying an interval when the job is submitted
|
243
|
+
# interval=minutes - Checkpointing is to be performed at an interval of minutes, which is the integer number of minutes of wall time used by the job. This value must be greater than zero.
|
244
|
+
# depth=number - Specify a number (depth) of checkpoint images to be kept in the checkpoint directory.
|
245
|
+
# dir=path - Specify a checkpoint directory (default is /var/spool/torque/checkpoint).
|
246
|
+
def validate_checkpoint(value)
|
247
|
+
if value.nil? || value=~/none|enabled|shutdown|periodic|interval|depth|dir/
|
248
|
+
value
|
249
|
+
else
|
250
|
+
raise "#{value} is not a valid option for checkpoint"
|
251
|
+
end
|
252
|
+
end
|
253
|
+
|
254
|
+
def validate_mail_options(opts)
|
255
|
+
value = opts[:m] || [opts[:send_on_abort], opts[:send_on_begin], opts[:send_on_end]].select{|item| item}.join
|
256
|
+
value.empty? ? nil : value
|
257
|
+
end
|
258
|
+
|
259
|
+
def validate_keep(opts)
|
260
|
+
if (value = opts[:k] || opts[:keep])
|
261
|
+
if value =~/eo|oe|e|o|n/
|
262
|
+
value
|
263
|
+
else
|
264
|
+
raise "#{value} is not a valid option for keep"
|
265
|
+
end
|
266
|
+
end
|
267
|
+
end
|
268
|
+
|
269
|
+
def validate_priority(opts)
|
270
|
+
value = opts[:p] || opts[:priority]
|
271
|
+
if value.nil? || (-1024..1023).include?(value)
|
272
|
+
value
|
273
|
+
else
|
274
|
+
raise "#{value} is out of range for priority, stay in between [-1024, +1023]"
|
275
|
+
end
|
276
|
+
end
|
277
|
+
# Check if the hash contains valid PBS options.
|
278
|
+
# If a key is not a valid pbs option a warning message is raise but the key is ket in the ostruct
|
279
|
+
# def validate_pbs_options(hash=nil)
|
280
|
+
# end
|
281
|
+
|
282
|
+
def fields
|
283
|
+
instance_variable_get("@table").keys
|
284
|
+
end
|
285
|
+
|
286
|
+
end # Job
|
287
|
+
end # PBS
|
288
|
+
|
289
|
+
|
290
|
+
# require 'bio-ngs'
|
291
|
+
# require 'ostruct'
|
292
|
+
|
293
|
+
|
294
|
+
# # This script can run only in a directory where data are trimmed.
|
295
|
+
# # Parameter: ensembl release number
|
296
|
+
# # the full path directory of a run
|
297
|
+
|
298
|
+
# release = ARGV[0]
|
299
|
+
# run = ARGV[1]
|
300
|
+
# path = ENV['ENSEMBL_STORAGE_PATH']
|
301
|
+
# ensembl_release = "Homo_sapiens.GRCh37.#{release}"
|
302
|
+
# index = "#{path}/release-#{release}/fasta/homo_sapiens/dna/#{ensembl_release}"
|
303
|
+
# transcriptome_index = "#{path}/release-#{release}/gtf/homo_sapiens/transcriptome_data/#{ensembl_release}/known"
|
304
|
+
# map_type = 'map_idx_e'
|
305
|
+
# cpus = 20
|
306
|
+
|
307
|
+
# tasks = []
|
308
|
+
|
309
|
+
# Bio::Ngs::Illumina.build(run).each do |project_name, project|
|
310
|
+
# project.each_sample do |sample_name, sample|
|
311
|
+
# # puts "#{run} #{project_name} #{sample_name}"
|
312
|
+
# tasks << task = OpenStruct.new(command:"", type:"", name:"", wd:"", cpus:"")
|
313
|
+
# task.type = map_type
|
314
|
+
# task.cpus = cpus
|
315
|
+
# task.wd = File.join(run,"Project_#{project_name}","Sample_#{sample_name}")
|
316
|
+
# task.name = "#{task.type}_#{sample_name}"
|
317
|
+
# task.command = <<-QSUB
|
318
|
+
# tophat -r 400 -p #{cpus} -o #{map_type} --transcriptome-index=#{transcriptome_index} #{index} #{sample.path}_R1.trimmed.fastq.gz #{sample.path}_R2.trimmed.fastq.gz
|
319
|
+
# samtools flagstat #{map_type}/accepted_hits.bam > #{map_type}/flagstat.txt
|
320
|
+
# QSUB
|
321
|
+
|
322
|
+
# end unless project_name=~/Undetermined/
|
323
|
+
# end
|
324
|
+
|
325
|
+
|
326
|
+
# #create PBS command
|
327
|
+
# tasks.each do |task|
|
328
|
+
# File.open(File.join(task.wd,"#{task.name}.pbs"), 'w') do |file|
|
329
|
+
# file.write <<-EOS
|
330
|
+
# #!/bin/bash
|
331
|
+
# #PBS -S /bin/bash
|
332
|
+
# #PBS -m abe
|
333
|
+
# #PBS -N #{task.name}
|
334
|
+
# #PBS -l nodes=1:ppn=#{task.cpus}
|
335
|
+
# #PBS -d #{task.wd}
|
336
|
+
# #PBS -e #{task.wd}
|
337
|
+
# #PBS -o #{task.wd}
|
338
|
+
# #{task.command}
|
339
|
+
# EOS
|
340
|
+
|
341
|
+
# end
|
342
|
+
# end
|