cwords 0.1.9

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,430 @@
1
+ #!/usr/bin/env jruby
2
+
3
+ srcdir = File.dirname(__FILE__)
4
+ basedir = srcdir + "/../"
5
+ libdir = basedir + '/lib/'
6
+ $LOAD_PATH << libdir
7
+
8
+ require 'wordRS-lib.rb'
9
+ require 'rubygems'
10
+ require 'progressbar'
11
+ require 'optparse'
12
+ require 'peach'
13
+ require 'java'
14
+ require libdir + 'ushuffle.jar'
15
+ java_import 'UShuffle'
16
+
17
+ #default options
18
+ options = Hash.new
19
+ options[:wordsize] = [7]
20
+ options[:split_words]=nil
21
+ options[:dbdir] = basedir + "db/"
22
+ options[:scoring_scheme] = 'pval'
23
+ options[:permutations]=50
24
+ options[:seqshuffles]=100
25
+ options[:rankfile]=nil
26
+ options[:seqfile]=nil
27
+ options[:report_words]=nil
28
+ options[:plot_words]=nil
29
+ options[:onlyanno]=nil
30
+ options[:dump]=nil
31
+ options[:testing]=nil
32
+ options[:rank_all]=nil
33
+ options[:rank_inverse]=nil
34
+ options[:rank_split_median]=nil
35
+ options[:rank_abs]=nil
36
+ options[:bg]=1 #mononucleotide shuffling
37
+ options[:threads]=1
38
+
39
+ $coptions = OptionParser.new do |opts|
40
+ opts.banner = "Usage: cwords [options]"
41
+
42
+ # analysis settings
43
+ opts.on("-c", "--scoring_scheme ARG", "scoring scheme") {|o| options[:scoring_scheme] = o}
44
+ opts.on("-p", "--permutations ARG", "number of list permutations") {|o| options[:permutations] = o.to_i}
45
+ opts.on("-q", "--shuffles ARG", "number of sequence shuffles for sequence bias correction") {|o| options[:seqshuffles] = o.to_i}
46
+ opts.on("-w", "--wordsize ARG", "wordsize") { |o| options[:wordsize] = o.split(",").map{|x| x.to_i}}
47
+ opts.on("-b", "--bg ARG", "background nucleotide model") {|o| options[:bg] = o.to_i}
48
+ opts.on("-t", "--threads ARG", "use multiple threads to parallelize computations") {|o| options[:threads] = o.to_i}
49
+ opts.on( "--split_words WORDS", "split sequence set based on occurrences of WORDS") {|o| options[:split_words] = o.split(",")}
50
+ opts.on( "--onlyanno", "only process annotated (i.e. mirbase) words") {|o| options[:onlyanno] = true}
51
+
52
+ # rank control
53
+ opts.on("-x", "--rank_all", "do not split positive and neg. values") {|o| options[:rank_all] = true}
54
+ opts.on("-m", "--rank_split_median", "split ranked list at median") {|o| options[:rank_split_median] = true}
55
+ opts.on("-i", "--rank_inverse", "inverse all ranked lists") {|o| options[:rank_inverse] = true}
56
+ opts.on("-a", "--rank_abs", "rank by absolute value") {|o| options[:rank_abs] = true}
57
+
58
+ # files and directories
59
+ opts.on("-r", "--rankfile ARG", "rank file") {|o| options[:rankfile] = o}
60
+ opts.on("-s", "--seqfile ARG", "sequence file") {|o| options[:seqfile] = o}
61
+ opts.on("-d", "--db ARG", "word database") { |o| options[:db] = o}
62
+
63
+ # output control
64
+ opts.on("-u", "--dump ARG", "dump top words") { |o| options[:dump] = o.to_i}
65
+ opts.on( "--report_words ARG", "report on words (comma separated)") {|o| options[:report_words] = o.split(',')}
66
+ opts.on( "--plot_words ARG", "only make plot files for words (comma separated)") {|o| options[:plot_words] = o.split(',')}
67
+ opts.on( "--testing", "testing mode") {|o| options[:testing] = true}
68
+ end
69
+
70
+ def show_help(msg="", code=0, io=STDOUT)
71
+ io.puts "#{msg}\n#{$coptions}"
72
+ exit(code)
73
+ end
74
+
75
+ begin
76
+ $coptions.parse!(ARGV)
77
+ rescue OptionParser::ParseError => error
78
+ puts error.message
79
+ puts $coptions
80
+ exit
81
+ end
82
+
83
+ # mandatory parameters
84
+ [:rankfile].each{|p| show_help("option '#{p}' mandatory") if options[p].nil?}
85
+ show_help("db or seqfile required") if !(options[:db] or options[:seqfile])
86
+ show_help("scoring scheme must be one of: obs,bin,pval") if !(['obs','bin','pval'].include?(options[:scoring_scheme]))
87
+
88
+ testing = options[:testing]
89
+
90
+ # get filename without directory
91
+ rankfilename = File.basename(options[:rankfile])
92
+
93
+ # hard-coded
94
+ output_top = 10
95
+
96
+ prankdir = basedir + "/db/" + options[:db] + "/" if options[:db]
97
+ annofile = basedir + "/resources/" + "word_annotation.tsv" #annotation
98
+ tidfile = basedir + "/resources/" + "genemap.tsv"
99
+ seqshuffles = 5000 # currently hardcoded for database
100
+ sequences = nil
101
+ nwords = options[:wordsize].map{|x| 4**x}.to_statarray.sum
102
+ bg=options[:bg] # TODO, make option
103
+ threads=options[:threads]
104
+
105
+ ###
106
+ ### Main program
107
+ ###
108
+
109
+ puts ">> Parameters"
110
+ options.each{|k,v| puts sprintf("%-20s: %s",k,v) if !v.nil?}
111
+
112
+ # read in mirbase seed family
113
+ word_annotation = Hash.new("") # seq => family
114
+ IO.readlines(annofile).each{|l| word_annotation[l.split("\t")[0]] = l.split("\t")[1]}
115
+
116
+ # read optional sequences
117
+ if options[:seqfile]
118
+ puts "\n>> reading sequences ..."
119
+ sequences = Hash.new
120
+ IO.readlines(options[:seqfile],">")[1..-1].each do |entry|
121
+ ls = entry.split("\n").map{|x| x.chomp}
122
+ # hash ensures sequence ids unique
123
+ sequences[ls[0]] = ls[1..-2].join('').downcase.gsub('u','t') # last field is ">"
124
+ end
125
+ seqshuffles = options[:seqshuffles]
126
+ end
127
+
128
+ # initialize word id hash, word sequence => word id (0..nwords-1)
129
+ wids = Hash.new
130
+ i = 0
131
+ options[:wordsize].each{|ws| ['a','g','c','t'].rep_perm(ws) {|seqa| wids[seqa.join('')]=i ; i+=1 }}
132
+
133
+ ###
134
+ ### ID mapping
135
+ ###
136
+
137
+ # pre-computed word database:
138
+ # map ids given in rankfile to internal ids
139
+ # remove rankfile entries with no match to internal id
140
+ # sequence file:
141
+ # take intersection of rank and sequence IDs
142
+
143
+ puts "\n>> Mapping and filtering IDs ..."
144
+
145
+ all = []
146
+ begin
147
+ idmap = Hash.new
148
+ internal_ids = nil
149
+
150
+ if sequences
151
+ internal_ids = sequences
152
+ else
153
+ IO.readlines(tidfile).each do |l|
154
+ tid = l.split(" ")[0]
155
+ l.split(" ")[1].split(",").each{|extid| idmap[extid] = tid}
156
+ end
157
+ internal_ids = idmap.invert # allowed internal ids
158
+ end
159
+
160
+ allh = Hash.new {|h,k| h[k] = []}
161
+ filtered = 0
162
+
163
+ IO.readlines(options[:rankfile]).each do |l|
164
+ l = l.split("\t")
165
+ #test if internal id or mapable external id
166
+ tid = (internal_ids.key?(l[0]) ? l[0] : idmap[l[0]])
167
+ tid.nil? ? filtered += 1 : allh[tid] << l[1].to_f
168
+ end
169
+
170
+ # filter unknown sequences
171
+ sequences.keys.each{|id| sequences.delete(id) if !allh.key?(id)} if sequences
172
+
173
+ # we currently mean-collapse ids, we could allow mean/min/max collapsing ...
174
+ all = allh.to_a.map{|tid,values| [tid,values.to_statarray.mean]}
175
+
176
+ puts "removed #{filtered} invalid transcript ids" if filtered > 0
177
+ end
178
+
179
+ allorder = Hash.new # tid => index in all
180
+ all.each_with_index{|x,i| allorder[x[0]] = i}
181
+
182
+ ###
183
+ ### Word enumeration (optional)
184
+ ###
185
+
186
+ wordscores = []
187
+ if sequences
188
+ puts "\n>> Enumerating words in sequences"
189
+ wordscores = Array.new(all.size) {Array.new(wids.size,0)} # {Java::short[wids.size].new}
190
+ pbar = ProgressBar.new("progress",sequences.size)
191
+ all.peach(threads) do |seqid,val|
192
+ us = UShuffle.new
193
+ seq=sequences[seqid]
194
+ seqidx=allorder[seqid]
195
+ pbar.inc
196
+ seqsize = seq.size
197
+ observed = Array.new(wids.size,0)
198
+ options[:wordsize].each{|ws| (0..seqsize-ws).each{|i| wid = wids[seq[i, ws]]; observed[wid] += 1 if not wid.nil?}}
199
+
200
+ case options[:scoring_scheme]
201
+ when "bin" then wordscores[seqidx] = observed.map{|x| x > 0 ? 1 : -1}
202
+ when "obs" then wordscores[seqidx] = observed
203
+ else
204
+ # pval, compute distribution of expected word occurrences
205
+ us.init_shuffle(seq,bg)
206
+ seqshuffles.times do |si|
207
+ seqsh = us.shuffle
208
+ expected = Array.new(wids.size,0)
209
+ options[:wordsize].each{|ws| (0..seqsize-ws).each{|i| wid = wids[seqsh[i, ws]]; expected[wid] += 1 if !wid.nil?}}
210
+ observed.each_with_index{|x,widx| wordscores[seqidx][widx] =+ 1 if expected[widx]>=x}
211
+ end
212
+ end
213
+ end
214
+ pbar.finish
215
+ end
216
+
217
+ ###
218
+ ### Generate list ranking
219
+ ###
220
+
221
+ analyze = []
222
+ if options[:rank_split_median]
223
+ # we should perhaps use an :inverse option,
224
+ # reversing the two pos and neg lists
225
+ med = all.map{|x| x[1]}.to_statarray.median
226
+ pos_set = all.select{|x| x[1] > med}.sort{|a,b| b[1] <=> a[1]}
227
+ neg_set = all.select{|x| x[1] <= med}.sort{|a,b| a[1] <=> b[1]}
228
+ analyze = [[pos_set,'med_positive'],[neg_set,'med_negative']]
229
+ elsif options[:rank_all] # do not split positive and negative range
230
+ pos_set = all.sort{|a,b| b[1] <=> a[1]}
231
+ neg_set = all.sort{|a,b| a[1] <=> b[1]}
232
+ analyze = [[pos_set,'all_positive'],[neg_set,'all_negative']]
233
+ elsif options[:rank_abs] # rank by absolute values
234
+ pos_set = all.map{|x| [x[0],x[1].abs]}.sort{|a,b| b[1] <=> a[1]}
235
+ neg_set = pos_set.reverse
236
+ analyze = [[pos_set,'abs_positive'],[neg_set,'abs_negative']]
237
+ else
238
+ pos_set = all.select{|x| x[1] > 0}.sort{|a,b| b[1] <=> a[1]}
239
+ neg_set = all.select{|x| x[1] < 0}.sort{|a,b| a[1] <=> b[1]}
240
+ analyze = [[pos_set,'positive'],[neg_set,'negative']]
241
+ end
242
+
243
+ # inverse lists
244
+ analyze.map!{|set,nm| [set.reverse,nm+".inv"]} if options[:rank_inverse]
245
+
246
+ # split sequence set when --split option is given
247
+ if options[:split_words]
248
+ seqs_with_words = Hash.new
249
+
250
+ options[:split_words].each do |split_word|
251
+ begin
252
+ IO.readlines(prankdir + split_word.downcase + ".rnk").each do |x|
253
+ l = x.split("\t")
254
+ seqs_with_words[l[0]] = 1 if l[1].to_i > 0
255
+ end
256
+ rescue
257
+ warn "could not split sequences on word #{split_word}: " + $!
258
+ end
259
+ end
260
+
261
+ analyze_split = []
262
+ analyze.each do |set,nm|
263
+ analyze_split += set.partition{|x| seqs_with_words.key?(x[0])}.zip([nm+".split+"+options[:split_words].join(","),nm+".split-"+options[:split_words].join(",")])
264
+ end
265
+ analyze = analyze_split
266
+ end
267
+
268
+ ###
269
+ ### Correlation analysis
270
+ ###
271
+
272
+ puts "\n>> Analyzing sequence sets: " + analyze.map{|x| x[1]}.join(", ")
273
+
274
+ analyze.each do |set,nm|
275
+ ngenes = set.size
276
+ puts "\n>> Analyzing #{nm} set ...\nnumber of genes: #{ngenes}"
277
+ next if ngenes == 0
278
+ perms = []
279
+ report = []
280
+ pfdrz = []
281
+
282
+ franks = Hash.new # tid => index in set
283
+ set.each_with_index{|x,i| franks[x[0]] = i}
284
+
285
+ puts "permuting #{options[:permutations]} times ...\n"
286
+ options[:permutations].times{|i| perms << (0..set.size-1).to_a.shuffle}
287
+
288
+ pbar = ProgressBar.new("progress",nwords)
289
+ wids.to_a.sort_by{|x| x[1]}.peach(threads) do |word,wid|
290
+ pbar.inc
291
+ next if options[:onlyanno] and not word_annotation.key?(word) #only process annotated words
292
+ next if options[:plot_words] and !options[:plot_words].include?(word)
293
+
294
+ plotfile = File.new(rankfilename + ".#{word}.#{nm}.csv","w") if options[:plot_words]
295
+
296
+ score = Array.new(ngenes) # scores ordered by fold change
297
+
298
+ if sequences
299
+ score = set.map{|x| wordscores[allorder[x[0]]][wid]}
300
+ score.map!{|x| -Math.log((x+1.0)/(seqshuffles+1))} if options[:scoring_scheme] == 'pval'
301
+ else # use precomputed word database
302
+ wordcounts = IO.readlines(prankdir + word + ".rnk").map{|x| x.split("\t")}.select{|x| franks.key?(x[0])}
303
+ case options[:scoring_scheme]
304
+ when "bin" then wordcounts.each{|id,obs,gte_obs,exp| score[franks[id]] = obs.to_i == 0 ? -1 : 1}
305
+ when "obs" then wordcounts.each{|id,obs,gte_obs,exp| score[franks[id]] = obs.to_f}
306
+ when "pval" then wordcounts.each{|id,obs,gte_obs,exp| score[franks[id]] = -Math.log((gte_obs.to_f+1)/(seqshuffles+1.0))}
307
+ end
308
+ end
309
+
310
+ smean = score.to_statarray.mean
311
+ maxrs = 0
312
+ leading_edge = 0
313
+ rs = 0 #running sum
314
+ rsa = [0]
315
+ score.each_with_index do |x,i|
316
+ rs += (x-smean)
317
+ rsa << rs
318
+ if rs.abs > maxrs.abs
319
+ maxrs = rs
320
+ leading_edge = i+1
321
+ end
322
+ end
323
+
324
+ plotfile.puts(([word+".score"] + [0] + score.map{|x| x.to_e(2)}).join(",")) if options[:plot_words]
325
+ plotfile.puts(([word+".rs"] + rsa).join(",")) if options[:plot_words]
326
+
327
+ # we are only interested in pos. maxrs scores,
328
+ # because we currently analyze up/down regulated seperately
329
+ next if maxrs <= 0
330
+
331
+ pmaxrs_pos = StatArray.new
332
+ perms.each_with_index do |psa,pidx|
333
+ prs = 0
334
+ prsa = [0]
335
+ pmaxrs = 0
336
+ psa.each do |i|
337
+ prs += score[i]-smean
338
+ prsa << prs
339
+ pmaxrs = prs if prs.abs > pmaxrs.abs
340
+ end
341
+ # the permuted scores are approx. symmetric around 0
342
+ pmaxrs_pos << pmaxrs.abs
343
+ plotfile.puts(([word+".rs."+pidx.to_s] + prsa).join(",")) if options[:plot_words]
344
+ end
345
+
346
+ pmean = pmaxrs_pos.mean
347
+ pstd = pmaxrs_pos.stddev
348
+
349
+ #Because the word zscore distr. can be quite different,
350
+ # we compute the deviation from the mean of the absolute dist.
351
+ # The permuted maxRS should be normally distr. (sum of random numbers)
352
+ pfdrz += pmaxrs_pos.map{|x| (x-pmean)/pstd}
353
+
354
+ #pvalue and fdr statistic for word is also computed based on abs. dist.
355
+ pval = (pmaxrs_pos.select{|x| x>=maxrs}.size+1.0)/(pmaxrs_pos.size+1)
356
+ zsc = (maxrs-pmean)/pstd
357
+
358
+ plotfile.close if options[:plot_words]
359
+ report << [wid,zsc,pval,nil,leading_edge]
360
+
361
+ end # wordsize
362
+ pbar.finish
363
+
364
+ ###
365
+ ### FDR
366
+ ###
367
+
368
+ puts "fdr calculation ..."
369
+ fdrrank = pfdrz.map{|x| [x,nil]} # [zscore,word_report_index]
370
+ report.each_with_index{|x,idx| fdrrank << [x[1],idx]}
371
+ fdrrank = fdrrank.sort_by{|x| x[0]}.reverse # sort high zscore to low zscore
372
+ nfp = pfdrz.size.to_f
373
+ ntp = report.size.to_f
374
+ word_fdrrank = Hash.new()
375
+ ifp = 0
376
+ itp = 0
377
+ fdrrank.each do |zsc,idx|
378
+ if idx.nil?
379
+ ifp += 1
380
+ else
381
+ itp += 1
382
+ fpr = ifp/nfp
383
+ tpr = itp/ntp
384
+ report[idx][3] = fpr/tpr
385
+ end
386
+ end
387
+
388
+ cutoff_fdr = [0.001,0.005,0.01,0.05,0.1,0.15,0.2,0.25,0.5]
389
+ puts ""
390
+ puts (["fdr <="] + cutoff_fdr.map{|x| x.to_s(3)} + ["total"]).join("\t")
391
+ puts (["count"] + cutoff_fdr.map{|x| report.select{|y| y[3] <= x}.size} + [report.size]).join("\t")
392
+
393
+ ###
394
+ ### Output summarization
395
+ ###
396
+
397
+ wids2 = wids.invert
398
+ report = report.sort_by{|x| x[1]}.reverse
399
+ puts "\nTop #{output_top} words"
400
+ puts ['rank','word','z-score','p-value','fdr','ledge','annotation'].map{|x| sprintf("%-10s",x)}.join('')
401
+ report[0,output_top].each_with_index do |r,i|
402
+ wd = wids2[r[0]]
403
+ s = [i+1,wd,r[1].to_s(2),r[2].to_e(2),r[3].to_e(2),r[4].to_s,word_annotation[wd]]
404
+ puts s.map{|x| sprintf("%-10s",x)}.join('')
405
+ end
406
+
407
+ if options[:report_words]
408
+ puts "......"
409
+ report.each_with_index do |r,i|
410
+ if options[:report_words].include?(r[0]) # and i > output_top
411
+ wd = wids2[r[0]]
412
+ s = [i+1,wd,r[1].to_s(2),r[2].to_e(2),r[3].to_e(2),r[4].to_s,word_annotation[wd]]
413
+ puts s.map{|x| sprintf("%-10s",x)}.join('')
414
+ end
415
+ end
416
+ end
417
+
418
+ if options[:dump]
419
+ fname = rankfilename + ".#{nm}." + options[:dump].to_s
420
+ of = File.new(fname,"w")
421
+ of.puts ['rank','word','z-score','p-value','fdr','ledge','GS size','annotation'].map{|x| sprintf("%-10s",x)}.join('')
422
+ puts "dumping top #{options[:dump]} words in file: #{fname}"
423
+ report[0..options[:dump]-1].each_with_index do |r,i|
424
+ wd = wids2[r[0]]
425
+ s = [i+1,wd,r[1].to_s(2),r[2].to_e(2),r[3].to_e(2),r[4].to_s,word_annotation[wd]]
426
+ of.puts s.map{|x| sprintf("%-10s",x)}.join('')
427
+ end
428
+ end
429
+
430
+ end
@@ -0,0 +1,92 @@
1
+ #!/usr/bin/env jruby
2
+
3
+ srcdir = File.dirname(__FILE__)
4
+ basedir = srcdir + "/../"
5
+ libdir = basedir + 'lib/'
6
+ $LOAD_PATH << libdir
7
+
8
+ require 'wordRS-lib.rb'
9
+ require 'rubygems'
10
+ require 'progressbar'
11
+ require 'optparse'
12
+ require 'fileutils'
13
+
14
+ tdir = basedir + '/tmp/'
15
+ FileUtils.mkdir_p tdir # create dir if it does not exist
16
+
17
+ ###
18
+ ### Main
19
+ ###
20
+
21
+ #default options
22
+ options = Hash.new
23
+ options[:wordsize] = [7]
24
+ options[:seqfile] = nil
25
+ options[:partitions] = 1
26
+ options[:stats] = ['p'] # p=p
27
+ options[:ruby]='jruby --fast -J-Xmx1024m'
28
+ options[:shuffles]=5000
29
+ options[:bg]=1 #mononucleotide shuffling
30
+
31
+ $coptions = OptionParser.new do |opts|
32
+ opts.on("-w", "--wordsize ARG", "wordsize") { |o| options[:wordsize] = o.split(",").map{|x| x.to_i}}
33
+ opts.on("-s", "--seqfile ARG", "sequence file") {|o| options[:seqfile] = o}
34
+ opts.on("-p", "--partitions ARG", "number of sequence partitions") {|o| options[:partitions] = o.to_i}
35
+ opts.on("-a", "--stats ARG", "sequence file") {|o| options[:stats] = o.split('')}
36
+ opts.on("-u", "--shuffle ARG", "number of shuffles") {|o| options[:shuffles] = o.to_i}
37
+ opts.on("--ruby ARG", "ruby interpreter") {|o| options[:ruby] = o}
38
+ opts.on("-b", "--bg ARG", "background nucleotide model") {|o| options[:bg] = o.to_i}
39
+ end
40
+
41
+ def show_help(msg="", code=0, io=STDOUT)
42
+ io.puts "#{msg}\n#{$coptions}"
43
+ exit(code)
44
+ end
45
+
46
+ begin
47
+ $coptions.parse!(ARGV)
48
+ rescue OptionParser::ParseError => error
49
+ puts error.message
50
+ puts $coptions
51
+ exit
52
+ end
53
+
54
+ #mandatory parameters
55
+ [:seqfile].each{ |p| show_help("option '#{p}' mandatory") if options[p].nil?}
56
+
57
+ exit("seqfile must have fasta-format") if !options[:seqfile].match(/.fa$/)
58
+ dbname = File.basename(options[:seqfile],'.fa')
59
+ dbdir = basedir + "/db/" + dbname + "_bg#{options[:bg]}"
60
+ FileUtils.mkdir_p dbdir # create dir if it does not exist
61
+
62
+ n=options[:partitions]
63
+
64
+ # word id's
65
+ @seqs = IO.readlines(options[:seqfile],"\n>")
66
+ puts "#{@seqs.size} sequences"
67
+
68
+ puts "purging database ..."
69
+ options[:wordsize].each do |wordsize|
70
+ ['a','g','c','t'].rep_perm(wordsize) {|seqa| wf = "#{dbdir}/#{seqa.join('')}.rnk"; File.delete(wf) if File.exist?(wf)}
71
+ end
72
+
73
+ puts "starting #{n} processes ..."
74
+
75
+ cmd = "#{options[:ruby]} #{basedir}/scripts/wordsrus_mkdb.rb"
76
+ cmd += " -w #{options[:wordsize].join(',')} -s #{options[:seqfile]} -a #{options[:stats].join(",")} -u #{options[:shuffles]} --bg #{options[:bg]}"
77
+
78
+ stamp = Time.now.to_i
79
+
80
+ partsize = @seqs.size/n
81
+ cmds = []
82
+ (n-1).times do |i|
83
+ cmds << cmd + " -p #{(i)*(partsize)+1}-#{(i+1)*(partsize)} &> #{tdir}#{dbname}_b#{options[:bg]}_#{i+1}_#{stamp}.dbout"
84
+ end
85
+ cmds << cmd + " -p #{partsize*(n-1)+1}-#{[n*(partsize),@seqs.size].max} &> #{tdir}#{dbname}_b#{options[:bg]}_#{n}_#{stamp}.dbout"
86
+ cmds.each do |c|
87
+ p c
88
+ exec c if fork.nil?
89
+ end
90
+
91
+ puts "Jobs started."
92
+ puts "Monitor with : tail #{tdir}#{dbname}_*#{stamp}.dbout"