workflow_manager 0.2.5 → 0.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/.rspec ADDED
@@ -0,0 +1,2 @@
1
+ --color
2
+ --require spec_helper
data/bin/wfm_monitoring CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env ruby
2
2
  # encoding: utf-8
3
3
  # 20121112 masa workflow manager client
4
- Version = '20131104-192323'
4
+ Version = '20160317-153614'
5
5
 
6
6
  require 'drb/drb'
7
7
  require 'workflow_manager/optparse_ex'
@@ -52,4 +52,5 @@ sge_options << "-n #{opt.nodes}" if opt.nodes
52
52
 
53
53
  script_content = File.read(script_file)
54
54
  workflow_manager = DRbObject.new_with_uri(uri)
55
- puts workflow_manager.start_monitoring(script_file, user, 0, script_content, project_number, sge_options.join(' '), opt.log)
55
+ #puts workflow_manager.start_monitoring(script_file, user, 0, script_content, project_number, sge_options.join(' '), opt.log)
56
+ puts workflow_manager.start_monitoring2(script_file, script_content, user, project_number, sge_options.join(' '), opt.log)
@@ -26,6 +26,8 @@ module WorkflowManager
26
26
  end
27
27
  def job_ends?(log_file)
28
28
  end
29
+ def job_pending?(job_id)
30
+ end
29
31
  def copy_commands(org_dir, dest_parent_dir, now=nil)
30
32
  end
31
33
  def kill_command(job_id)
@@ -105,7 +107,8 @@ module WorkflowManager
105
107
  qstat_flag = false
106
108
  IO.popen('qstat -u "*"') do |io|
107
109
  while line=io.gets
108
- if line =~ /#{job_id}/
110
+ jobid, prior, name, user, state, *others = line.chomp.split
111
+ if jobid.strip == job_id and state == 'r'
109
112
  qstat_flag = true
110
113
  break
111
114
  end
@@ -125,6 +128,19 @@ module WorkflowManager
125
128
  end
126
129
  log_flag
127
130
  end
131
+ def job_pending?(job_id)
132
+ qstat_flag = false
133
+ IO.popen('qstat -u "*"') do |io|
134
+ while line=io.gets
135
+ jobid, prior, name, user, state, *others = line.chomp.split
136
+ if jobid.strip == job_id and state == 'qw'
137
+ qstat_flag = true
138
+ break
139
+ end
140
+ end
141
+ end
142
+ qstat_flag
143
+ end
128
144
  def copy_commands(org_dir, dest_parent_dir, now=nil)
129
145
  commands = if now
130
146
  ["g-req copynow #{org_dir} #{dest_parent_dir}"]
@@ -3,6 +3,7 @@
3
3
 
4
4
  require 'drb/drb'
5
5
  require 'fileutils'
6
+ require 'csv'
6
7
  begin
7
8
  require 'kyotocabinet'
8
9
  NO_KYOTO = false
@@ -105,7 +106,7 @@ module WorkflowManager
105
106
  log_puts("Server starts")
106
107
  end
107
108
  def hello
108
- 'hello, '+ @cluster.name
109
+ 'hello test hoge, '+ @cluster.name
109
110
  end
110
111
  def copy_commands(org_dir, dest_parent_dir, now=nil)
111
112
  @cluster.copy_commands(org_dir, dest_parent_dir, now)
@@ -131,6 +132,111 @@ module WorkflowManager
131
132
  end
132
133
  end
133
134
  end
135
+ def input_dataset_tsv_path(script_content)
136
+ gstore_dir = nil
137
+ path = nil
138
+ script_content.split(/\n/).each do |line|
139
+ if line =~ /GSTORE_DIR=(.+)/
140
+ gstore_dir = $1.chomp
141
+ elsif line =~ /INPUT_DATASET=(.+)/
142
+ path = $1.chomp
143
+ break
144
+ end
145
+ end
146
+ [gstore_dir, path]
147
+ end
148
+ def input_dataset_file_list(dataset_tsv_path)
149
+ file_list = []
150
+ CSV.foreach(dataset_tsv_path, :headers=>true, :col_sep=>"\t") do |row|
151
+ row.each do |header, value|
152
+ if header =~ /\[File\]/
153
+ file_list << value
154
+ end
155
+ end
156
+ end
157
+ file_list
158
+ end
159
+ def input_dataset_exist?(file_list)
160
+ flag = true
161
+ file_list.each do |file|
162
+ unless File.exist?(file)
163
+ flag = false
164
+ break
165
+ end
166
+ end
167
+ flag
168
+ end
169
+ def update_time_status(job_id, current_status, script_name, user, project_number)
170
+ # if the current status changed from last time, then save, otherwise do nothing
171
+ # once status changes into success or fail, then the thread is expected to be killed in later process
172
+ @statuses.transaction do |statuses|
173
+ start_time = nil
174
+ if stat = statuses[job_id]
175
+ last_status, script_name, start_time, user, project_number = stat.split(/,/)
176
+ end
177
+ time = if start_time
178
+ if current_status == 'success' or current_status == 'fail'
179
+ start_time + '/' + Time.now.strftime("%Y-%m-%d %H:%M:%S")
180
+ elsif current_status != last_status
181
+ Time.now.strftime("%Y-%m-%d %H:%M:%S")
182
+ end
183
+ else
184
+ Time.now.strftime("%Y-%m-%d %H:%M:%S")
185
+ end
186
+ if time
187
+ statuses[job_id] = [current_status, script_name, time, user, project_number].join(',')
188
+ end
189
+ end
190
+ end
191
+ def finalize_monitoring(current_status, log_file, log_dir)
192
+ if current_status == 'success' or current_status == 'fail'
193
+ unless log_dir.empty?
194
+ copy_commands(log_file, log_dir).each do |command|
195
+ log_puts(command)
196
+ system command
197
+ end
198
+ err_file = log_file.gsub('_o.log','_e.log')
199
+ copy_commands(err_file, log_dir).each do |command|
200
+ log_puts(command)
201
+ system command
202
+ end
203
+ end
204
+ Thread.current.kill
205
+ end
206
+ end
207
+ def start_monitoring2(script_path, script_content, user='sushi_lover', project_number=0, sge_options='', log_dir='')
208
+ # script_path is only used to generate a log file name
209
+ # It is not used to read the script contents
210
+ gstore_dir, input_dataset = input_dataset_tsv_path(script_content)
211
+ if gstore_dir and input_dataset
212
+ path = File.join(gstore_dir, input_dataset)
213
+ file_list = input_dataset_file_list(path)
214
+ if input_dataset_exist?(file_list)
215
+ # wait until the files come
216
+ end
217
+ end
218
+
219
+ job_id, log_file, command = @cluster.submit_job(script_path, script_content, sge_options)
220
+
221
+ if job_id and log_file
222
+ worker = Thread.new(job_id, log_file, log_dir, script_path) do |job_id, log_file, log_dir, script_path|
223
+ loop do
224
+ # check status
225
+ current_status = check_status(job_id, log_file)
226
+
227
+ # save time and status
228
+ update_time_status(job_id, current_status, script_path, user, project_number)
229
+
230
+ # finalize (kill current thred) in case of success or fail
231
+ finalize_monitoring(current_status, log_file, log_dir)
232
+
233
+ # wait
234
+ sleep @interval
235
+ end # loop
236
+ end # thread
237
+ job_id
238
+ end
239
+ end
134
240
  def start_monitoring(submit_command, user = 'sushi lover', resubmit = 0, script = '', project_number = 0, sge_options='', log_dir = '')
135
241
  log_puts("monitoring: script=" + submit_command + " user=" + user + " resubmit=" + resubmit.to_s + " project=" + project_number.to_s + " sge option=" + sge_options + " log dir=" + log_dir.to_s)
136
242
 
@@ -227,7 +333,7 @@ module WorkflowManager
227
333
  #@statuses.open(@db_stat)
228
334
  @statuses.transaction do |statuses|
229
335
  if new_status and stat = statuses[job_id.to_s]
230
- status_list = ['success', 'running', 'fail']
336
+ status_list = ['success', 'running', 'pending', 'fail']
231
337
  if status_list.include?(new_status)
232
338
  items = stat.split(/,/)
233
339
  items.shift
@@ -294,17 +400,17 @@ module WorkflowManager
294
400
  script
295
401
  end
296
402
  def success_or_fail(job_id, log_file)
297
- job_running = @cluster.job_running?(job_id)
298
- job_ends = @cluster.job_ends?(log_file)
299
- msg = if job_running
403
+ msg = if @cluster.job_running?(job_id)
300
404
  'running'
301
- elsif job_ends
405
+ elsif @cluster.job_ends?(log_file)
302
406
  'success'
407
+ elsif @cluster.job_pending?(job_id)
408
+ 'pending'
303
409
  else
304
410
  'fail'
305
411
  end
306
- msg
307
412
  end
413
+ alias_method :check_status, :success_or_fail
308
414
  end
309
415
  end
310
416
 
@@ -1,3 +1,3 @@
1
1
  module WorkflowManager
2
- VERSION = "0.2.5"
2
+ VERSION = "0.2.6"
3
3
  end
@@ -0,0 +1,97 @@
1
+ #!/usr/bin/env ruby
2
+ # encoding: utf-8
3
+
4
+ require './lib/workflow_manager/cluster'
5
+
6
+ include WorkflowManager
7
+ describe Cluster do
8
+ subject(:cluster) {Cluster.new}
9
+ context 'when new' do
10
+ it {is_expected.to be_an_instance_of Cluster} # RSpec3
11
+ # it {should be_an_instance_of Cluster} # RSpec2
12
+ # example {expect(cluster).to be_an_instance_of Cluster} # RSpec3
13
+ # its(:options) {should be_empty} # RSpec2, does not work anymore
14
+ # example {expect(cluster.options).to be_empty}
15
+ end
16
+ describe '#job_running?' do
17
+ subject {cluster.job_running?('job_id')}
18
+ it {is_expected.to be_nil}
19
+ end
20
+ describe '#job_ends?' do
21
+ subject {cluster.job_ends?('log_file')}
22
+ it {is_expected.to be_nil}
23
+ end
24
+ describe '#job_pending?' do
25
+ subject {cluster.job_pending?('job_id')}
26
+ it {is_expected.to be_nil}
27
+ end
28
+ end
29
+
30
+ describe FGCZCluster do
31
+ subject(:cluster) {FGCZCluster.new}
32
+ context 'when new' do
33
+ it {is_expected.to be_an_instance_of FGCZCluster}
34
+ end
35
+ describe '#job_running?' do
36
+ let(:line) {' 72757 0.50661 Gcal017211 pacbio r 03/11/2016'}
37
+ let(:job_id) {'72757'}
38
+ subject {cluster.job_running?(job_id)}
39
+ let(:io) {double('io')}
40
+ before do
41
+ allow(IO).to receive(:popen).and_yield(io)
42
+ end
43
+ context 'when running' do
44
+ before do
45
+ allow(io).to receive(:gets).and_return(line)
46
+ end
47
+ it {is_expected.to eq true}
48
+ end
49
+ context 'when not running?' do
50
+ before do
51
+ allow(io).to receive(:gets).and_return(nil)
52
+ end
53
+ it {is_expected.to eq false}
54
+ end
55
+ end
56
+ describe '#job_ends?' do
57
+ let(:log_file) {'log_file'}
58
+ subject {cluster.job_ends?(log_file)}
59
+ let(:io) {double('io')}
60
+ before do
61
+ allow(IO).to receive(:popen).and_yield(io)
62
+ end
63
+ context 'when job ends' do
64
+ before do
65
+ allow(io).to receive(:gets).and_return('__SCRIPT END__')
66
+ end
67
+ it {is_expected.to eq true}
68
+ end
69
+ context 'when job not ends' do
70
+ before do
71
+ allow(io).to receive(:gets).and_return(nil)
72
+ end
73
+ it {is_expected.to eq false}
74
+ end
75
+ end
76
+ describe '#job_pending?' do
77
+ let(:job_id) {'1234'}
78
+ let(:line) {' 1234 0.50661 Gcal017211 pacbio qw 03/11/2016'}
79
+ subject {cluster.job_pending?(job_id)}
80
+ let(:io) {double('io')}
81
+ before do
82
+ allow(IO).to receive(:popen).and_yield(io)
83
+ end
84
+ context 'when pending' do
85
+ before do
86
+ allow(io).to receive(:gets).and_return(line)
87
+ end
88
+ it {is_expected.to eq true}
89
+ end
90
+ context 'when not pending' do
91
+ before do
92
+ allow(io).to receive(:gets).and_return(nil)
93
+ end
94
+ it {is_expected.to eq false}
95
+ end
96
+ end
97
+ end
@@ -0,0 +1,210 @@
1
+ #!/usr/bin/env ruby
2
+ # encoding: utf-8
3
+
4
+ require './lib/workflow_manager/server'
5
+
6
+ include WorkflowManager
7
+ describe Server do
8
+ subject(:server) {Server.new}
9
+ let(:cluster){double('local_computer')}
10
+ before do
11
+ WorkflowManager::Server.configure do |config|
12
+ config.log_dir = '/srv/GT/analysis/masaomi/workflow_manager/run_workflow_manager/logs'
13
+ config.db_dir = '/srv/GT/analysis/masaomi/workflow_manager/run_workflow_manager/dbs'
14
+ config.interval = 30
15
+ config.resubmit = 0
16
+ #config.cluster = WorkflowManager::LocalComputer.new('local_computer')
17
+ config.cluster = cluster
18
+ allow(config.cluster).to receive_messages(:log_dir= => nil, :name => 'local_computer')
19
+ end
20
+ # suppress puts
21
+ allow($stdout).to receive(:write)
22
+ end
23
+ context 'when new' do
24
+ it {is_expected.to be_an_instance_of Server} # RSpec3
25
+ end
26
+ describe '#input_dataset_exist?' do
27
+ let(:file_list) {['file1', 'file2']}
28
+ subject{server.input_dataset_exist?(file_list)}
29
+ context 'when file exist' do
30
+ before do
31
+ allow(File).to receive(:exist?).and_return(true)
32
+ end
33
+ it {is_expected.to eq true}
34
+ end
35
+ context 'when file not exist' do
36
+ before do
37
+ allow(File).to receive(:exist?).and_return(false)
38
+ end
39
+ it {is_expected.to eq false}
40
+ end
41
+ end
42
+ describe '#input_dataset_file_list' do
43
+ subject{server.input_dataset_file_list('input_dataset_tsv_path')}
44
+ let(:rows) {{'Read1 [File]'=>'file1', 'Read2 [File]'=>'file2'} }
45
+ before do
46
+ allow(CSV).to receive(:foreach).and_yield(rows)
47
+ end
48
+ let(:sample_file_list) { ['file1', 'file2'] }
49
+ it {is_expected.to eq sample_file_list}
50
+ end
51
+ describe '#input_dataset_tsv_path' do
52
+ let(:sample_script) {
53
+ "SCRATCH_DIR=/scratch/test_masa_2016-03-03--16-36-42_temp$$
54
+ GSTORE_DIR=/srv/gstore/projects
55
+ INPUT_DATASET=/srv/gstore/projects/p1535/test_masa/input_dataset.tsv"
56
+ }
57
+ let(:path){
58
+ [
59
+ '/srv/gstore/projects',
60
+ '/srv/gstore/projects/p1535/test_masa/input_dataset.tsv'
61
+ ]
62
+ }
63
+ subject{server.input_dataset_tsv_path(sample_script)}
64
+ it {is_expected.to eq path}
65
+ end
66
+ describe '#start_monitoring2' do
67
+ let(:script_path){'script_file'}
68
+ let(:script_content){'script_content'}
69
+ let(:cluster){double('cluster')}
70
+ before do
71
+ allow(server).to receive(:input_dataset_tsv_path)
72
+ allow(server).to receive(:input_dataset_file_list)
73
+ allow(server).to receive(:input_dataset_exist?)
74
+ end
75
+ context 'when submit_job failed' do
76
+ before do
77
+ allow(cluster).to receive(:submit_job)
78
+ end
79
+ subject {server.start_monitoring2(script_path, script_content)}
80
+ it {is_expected.to be_nil}
81
+ end
82
+ context 'when submit_job successed' do
83
+ before do
84
+ allow(cluster).to receive(:submit_job).and_return(['job_id', 'log_file', 'command'])
85
+ allow(Thread).to receive(:new)
86
+ end
87
+ subject {server.start_monitoring2(script_path, script_content)}
88
+ it {is_expected.to eq 'job_id'}
89
+ end
90
+ end
91
+ describe '#success_or_fail' do
92
+ let(:job_id){'job_id'}
93
+ let(:log_file){'log_file'}
94
+ let(:cluster){double('cluster')}
95
+ before do
96
+ server.instance_variable_set(:@cluster, cluster)
97
+ end
98
+ context 'when job running' do
99
+ before do
100
+ allow(cluster).to receive(:job_running?).and_return(true)
101
+ allow(cluster).to receive(:job_ends?).and_return(nil)
102
+ allow(cluster).to receive(:job_pending?).and_return(nil)
103
+ end
104
+ subject {server.success_or_fail(job_id, log_file)}
105
+ it {is_expected.to eq 'running'}
106
+ end
107
+ context 'when job ends' do
108
+ before do
109
+ allow(cluster).to receive(:job_running?).and_return(nil)
110
+ allow(cluster).to receive(:job_ends?).and_return(true)
111
+ allow(cluster).to receive(:job_pending?).and_return(nil)
112
+ end
113
+ subject {server.success_or_fail(job_id, log_file)}
114
+ it {is_expected.to eq 'success'}
115
+ end
116
+ context 'when job pending' do
117
+ before do
118
+ allow(cluster).to receive(:job_running?).and_return(nil)
119
+ allow(cluster).to receive(:job_ends?).and_return(nil)
120
+ allow(cluster).to receive(:job_pending?).and_return(true)
121
+ end
122
+ subject {server.success_or_fail(job_id, log_file)}
123
+ it {is_expected.to eq 'pending'}
124
+ end
125
+ end
126
+ describe '#status' do
127
+ let(:statuses) {double('statuses')}
128
+ before do
129
+ server.instance_variable_set(:@statuses, statuses)
130
+ end
131
+ context 'when read status' do
132
+ before do
133
+ allow(statuses).to receive(:transaction).and_yield({'job_id'=>'stat'})
134
+ end
135
+ subject {server.status('job_id')}
136
+ it {is_expected.to eq 'stat'}
137
+ end
138
+ context 'when assign status' do
139
+ before do
140
+ allow(statuses).to receive(:transaction).and_yield({'job_id'=>'running'})
141
+ end
142
+ subject {server.status('job_id', 'success')}
143
+ it {is_expected.to eq 'success'}
144
+ end
145
+ end
146
+ describe '#update_time_status' do
147
+ let(:statuses) {double('statuses')}
148
+ before do
149
+ server.instance_variable_set(:@statuses, statuses)
150
+ end
151
+ context 'when initial call' do
152
+ before do
153
+ allow(statuses).to receive(:transaction).and_yield({'job_id' => nil})
154
+ allow(Time).to receive_message_chain(:now, :strftime).and_return('time')
155
+ end
156
+ let(:expected) {'current_status,script_name,time,user,project_number'}
157
+ subject {server.update_time_status('job_id', 'current_status', 'script_name', 'user', 'project_number')}
158
+ it {is_expected.to eq expected}
159
+ end
160
+ context 'when changing status from running to success' do
161
+ let(:last_status) {'running,script_name,start_time,user,project_number'}
162
+ before do
163
+ allow(statuses).to receive(:transaction).and_yield({'job_id' => last_status})
164
+ allow(Time).to receive_message_chain(:now, :strftime).and_return('end_time')
165
+ end
166
+ subject {server.update_time_status('job_id', 'success', 'script_name', 'user', 'project_number')}
167
+ let(:expected) {'success,script_name,start_time/end_time,user,project_number'}
168
+ it {is_expected.to eq expected}
169
+ end
170
+ context 'when current_status and new_sutatus are same' do
171
+ let(:last_status) {'running,script_name,start_time,user,project_number'}
172
+ before do
173
+ allow(statuses).to receive(:transaction).and_yield({'job_id' => last_status})
174
+ allow(Time).to receive_message_chain(:now, :strftime).and_return('end_time')
175
+ end
176
+ subject {server.update_time_status('job_id', 'running', 'script_name', 'user', 'project_number')}
177
+ it {is_expected.to be_nil}
178
+ end
179
+ context 'when current_status==running and last_status==pending' do
180
+ let(:last_status) {'pending,script_name,start_time,user,project_number'}
181
+ before do
182
+ allow(statuses).to receive(:transaction).and_yield({'job_id' => last_status})
183
+ allow(Time).to receive_message_chain(:now, :strftime).and_return('end_time')
184
+ end
185
+ subject {server.update_time_status('job_id', 'running', 'script_name', 'user', 'project_number')}
186
+ let(:expected) {"running,script_name,end_time,user,project_number"}
187
+ it {is_expected.to eq expected}
188
+ end
189
+ end
190
+ describe '#finalize_monitoring' do
191
+ context 'when status == running' do
192
+ before do
193
+ allow(server).to receive(:log_puts)
194
+ allow(Thread).to receive_message_chain(:current, :kill)
195
+ end
196
+ subject {server.finalize_monitoring('running', 'log_file', 'log_dir')}
197
+ it {is_expected.to be_nil}
198
+ end
199
+ context 'when status == success' do
200
+ before do
201
+ allow(server).to receive(:log_puts)
202
+ allow(Thread).to receive_message_chain(:current, :kill)
203
+ #expect(server).to receive(:system).twice
204
+ expect(server).to receive(:copy_commands).twice.and_return([])
205
+ end
206
+ subject {server.finalize_monitoring('success', 'log_file', 'log_dir')}
207
+ it {is_expected.to be_nil}
208
+ end
209
+ end
210
+ end
@@ -0,0 +1,96 @@
1
+ # This file was generated by the `rspec --init` command. Conventionally, all
2
+ # specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`.
3
+ # The generated `.rspec` file contains `--require spec_helper` which will cause
4
+ # this file to always be loaded, without a need to explicitly require it in any
5
+ # files.
6
+ #
7
+ # Given that it is always loaded, you are encouraged to keep this file as
8
+ # light-weight as possible. Requiring heavyweight dependencies from this file
9
+ # will add to the boot time of your test suite on EVERY test run, even for an
10
+ # individual file that may not need all of that loaded. Instead, consider making
11
+ # a separate helper file that requires the additional dependencies and performs
12
+ # the additional setup, and require it from the spec files that actually need
13
+ # it.
14
+ #
15
+ # The `.rspec` file also contains a few flags that are not defaults but that
16
+ # users commonly want.
17
+ #
18
+ # See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration
19
+ RSpec.configure do |config|
20
+ # rspec-expectations config goes here. You can use an alternate
21
+ # assertion/expectation library such as wrong or the stdlib/minitest
22
+ # assertions if you prefer.
23
+ config.expect_with :rspec do |expectations|
24
+ # This option will default to `true` in RSpec 4. It makes the `description`
25
+ # and `failure_message` of custom matchers include text for helper methods
26
+ # defined using `chain`, e.g.:
27
+ # be_bigger_than(2).and_smaller_than(4).description
28
+ # # => "be bigger than 2 and smaller than 4"
29
+ # ...rather than:
30
+ # # => "be bigger than 2"
31
+ expectations.include_chain_clauses_in_custom_matcher_descriptions = true
32
+ end
33
+
34
+ # rspec-mocks config goes here. You can use an alternate test double
35
+ # library (such as bogus or mocha) by changing the `mock_with` option here.
36
+ config.mock_with :rspec do |mocks|
37
+ # Prevents you from mocking or stubbing a method that does not exist on
38
+ # a real object. This is generally recommended, and will default to
39
+ # `true` in RSpec 4.
40
+ mocks.verify_partial_doubles = true
41
+ end
42
+
43
+ # The settings below are suggested to provide a good initial experience
44
+ # with RSpec, but feel free to customize to your heart's content.
45
+ =begin
46
+ # These two settings work together to allow you to limit a spec run
47
+ # to individual examples or groups you care about by tagging them with
48
+ # `:focus` metadata. When nothing is tagged with `:focus`, all examples
49
+ # get run.
50
+ config.filter_run :focus
51
+ config.run_all_when_everything_filtered = true
52
+
53
+ # Allows RSpec to persist some state between runs in order to support
54
+ # the `--only-failures` and `--next-failure` CLI options. We recommend
55
+ # you configure your source control system to ignore this file.
56
+ config.example_status_persistence_file_path = "spec/examples.txt"
57
+
58
+ # Limits the available syntax to the non-monkey patched syntax that is
59
+ # recommended. For more details, see:
60
+ # - http://rspec.info/blog/2012/06/rspecs-new-expectation-syntax/
61
+ # - http://www.teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/
62
+ # - http://rspec.info/blog/2014/05/notable-changes-in-rspec-3/#zero-monkey-patching-mode
63
+ config.disable_monkey_patching!
64
+
65
+ # This setting enables warnings. It's recommended, but in some cases may
66
+ # be too noisy due to issues in dependencies.
67
+ config.warnings = true
68
+
69
+ # Many RSpec users commonly either run the entire suite or an individual
70
+ # file, and it's useful to allow more verbose output when running an
71
+ # individual spec file.
72
+ if config.files_to_run.one?
73
+ # Use the documentation formatter for detailed output,
74
+ # unless a formatter has already been configured
75
+ # (e.g. via a command-line flag).
76
+ config.default_formatter = 'doc'
77
+ end
78
+
79
+ # Print the 10 slowest examples and example groups at the
80
+ # end of the spec run, to help surface which specs are running
81
+ # particularly slow.
82
+ config.profile_examples = 10
83
+
84
+ # Run specs in random order to surface order dependencies. If you find an
85
+ # order dependency and want to debug it, you can fix the order by providing
86
+ # the seed, which is printed after each run.
87
+ # --seed 1234
88
+ config.order = :random
89
+
90
+ # Seed global randomization in this process using the `--seed` CLI option.
91
+ # Setting this allows you to use `--seed` to deterministically reproduce
92
+ # test failures related to randomization by passing the same `--seed` value
93
+ # as the one that triggered the failure.
94
+ Kernel.srand config.seed
95
+ =end
96
+ end
metadata CHANGED
@@ -2,7 +2,7 @@
2
2
  name: workflow_manager
3
3
  version: !ruby/object:Gem::Version
4
4
  prerelease:
5
- version: 0.2.5
5
+ version: 0.2.6
6
6
  platform: ruby
7
7
  authors:
8
8
  - Functional Genomics Center Zurich
@@ -10,7 +10,7 @@ autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
12
 
13
- date: 2015-10-15 00:00:00 Z
13
+ date: 2016-03-21 00:00:00 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  name: bundler
@@ -53,6 +53,7 @@ extra_rdoc_files: []
53
53
 
54
54
  files:
55
55
  - .gitignore
56
+ - .rspec
56
57
  - Gemfile
57
58
  - LICENSE.txt
58
59
  - README.md
@@ -73,6 +74,9 @@ files:
73
74
  - lib/workflow_manager/optparse_ex.rb
74
75
  - lib/workflow_manager/server.rb
75
76
  - lib/workflow_manager/version.rb
77
+ - spec/cluster_spec.rb
78
+ - spec/server_spec.rb
79
+ - spec/spec_helper.rb
76
80
  - workflow_manager.gemspec
77
81
  homepage: ""
78
82
  licenses:
@@ -101,5 +105,7 @@ rubygems_version: 1.8.23
101
105
  signing_key:
102
106
  specification_version: 3
103
107
  summary: Workflow Manager manages job submissions using dRuby.
104
- test_files: []
105
-
108
+ test_files:
109
+ - spec/cluster_spec.rb
110
+ - spec/server_spec.rb
111
+ - spec/spec_helper.rb