scout-gear 8.0.0 → 9.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.vimproject +48 -9
- data/Rakefile +6 -1
- data/VERSION +1 -1
- data/bin/scout +16 -4
- data/doc/lib/scout/path.md +35 -0
- data/doc/lib/scout/workflow/task.md +13 -0
- data/lib/rbbt-scout.rb +2 -1
- data/lib/scout/cmd.rb +32 -29
- data/lib/scout/concurrent_stream.rb +36 -19
- data/lib/scout/exceptions.rb +10 -0
- data/lib/scout/indiferent_hash.rb +17 -0
- data/lib/scout/log/color.rb +11 -11
- data/lib/scout/log/progress/report.rb +8 -5
- data/lib/scout/log/progress/util.rb +3 -0
- data/lib/scout/log/trap.rb +3 -3
- data/lib/scout/log.rb +67 -36
- data/lib/scout/meta_extension.rb +34 -0
- data/lib/scout/misc/digest.rb +11 -2
- data/lib/scout/misc/filesystem.rb +2 -3
- data/lib/scout/misc/format.rb +12 -7
- data/lib/scout/misc/monitor.rb +11 -0
- data/lib/scout/misc/system.rb +48 -0
- data/lib/scout/named_array.rb +8 -0
- data/lib/scout/offsite/ssh.rb +174 -0
- data/lib/scout/offsite/step.rb +100 -0
- data/lib/scout/offsite/sync.rb +55 -0
- data/lib/scout/offsite.rb +3 -0
- data/lib/scout/open/lock.rb +5 -24
- data/lib/scout/open/remote.rb +12 -1
- data/lib/scout/open/stream.rb +109 -122
- data/lib/scout/open/util.rb +9 -0
- data/lib/scout/open.rb +12 -11
- data/lib/scout/path/find.rb +15 -10
- data/lib/scout/path/util.rb +5 -0
- data/lib/scout/path.rb +1 -1
- data/lib/scout/persist/serialize.rb +4 -4
- data/lib/scout/persist.rb +1 -1
- data/lib/scout/resource/open.rb +8 -0
- data/lib/scout/resource/path.rb +16 -9
- data/lib/scout/resource/software.rb +4 -2
- data/lib/scout/resource/util.rb +10 -4
- data/lib/scout/resource.rb +2 -0
- data/lib/scout/tsv/dumper.rb +5 -1
- data/lib/scout/tsv/index.rb +28 -86
- data/lib/scout/tsv/open.rb +35 -14
- data/lib/scout/tsv/parser.rb +22 -5
- data/lib/scout/tsv/persist/tokyocabinet.rb +2 -0
- data/lib/scout/tsv/stream.rb +204 -0
- data/lib/scout/tsv/transformer.rb +11 -0
- data/lib/scout/tsv.rb +9 -2
- data/lib/scout/work_queue/worker.rb +2 -2
- data/lib/scout/work_queue.rb +37 -12
- data/lib/scout/workflow/definition.rb +2 -1
- data/lib/scout/workflow/deployment/orchestrator.rb +254 -0
- data/lib/scout/workflow/deployment.rb +1 -0
- data/lib/scout/workflow/step/dependencies.rb +46 -14
- data/lib/scout/workflow/step/file.rb +5 -0
- data/lib/scout/workflow/step/info.rb +13 -3
- data/lib/scout/workflow/step/inputs.rb +5 -0
- data/lib/scout/workflow/step/load.rb +1 -1
- data/lib/scout/workflow/step/provenance.rb +1 -0
- data/lib/scout/workflow/step/status.rb +27 -9
- data/lib/scout/workflow/step.rb +82 -30
- data/lib/scout/workflow/task/dependencies.rb +116 -0
- data/lib/scout/workflow/task/inputs.rb +36 -17
- data/lib/scout/workflow/task.rb +12 -109
- data/lib/scout/workflow/usage.rb +57 -41
- data/lib/scout/workflow.rb +19 -13
- data/lib/scout-gear.rb +2 -0
- data/lib/scout.rb +6 -0
- data/scout-gear.gemspec +38 -7
- data/scout_commands/doc +37 -0
- data/scout_commands/find +1 -0
- data/scout_commands/offsite +30 -0
- data/scout_commands/resource/produce +66 -0
- data/scout_commands/template +52 -0
- data/scout_commands/update +29 -0
- data/scout_commands/workflow/info +15 -3
- data/scout_commands/workflow/install +105 -0
- data/scout_commands/workflow/task +46 -6
- data/share/software/install_helpers +2 -2
- data/share/templates/command +25 -0
- data/share/templates/workflow.rb +14 -0
- data/test/scout/offsite/test_ssh.rb +15 -0
- data/test/scout/offsite/test_step.rb +32 -0
- data/test/scout/offsite/test_sync.rb +36 -0
- data/test/scout/offsite/test_task.rb +0 -0
- data/test/scout/resource/test_path.rb +6 -0
- data/test/scout/test_named_array.rb +6 -0
- data/test/scout/test_persist.rb +3 -2
- data/test/scout/test_tsv.rb +17 -0
- data/test/scout/test_work_queue.rb +64 -42
- data/test/scout/tsv/persist/test_adapter.rb +1 -1
- data/test/scout/tsv/test_index.rb +14 -0
- data/test/scout/tsv/test_parser.rb +35 -0
- data/test/scout/tsv/test_stream.rb +200 -0
- data/test/scout/tsv/test_transformer.rb +12 -0
- data/test/scout/workflow/deployment/test_orchestrator.rb +272 -0
- data/test/scout/workflow/step/test_dependencies.rb +68 -0
- data/test/scout/workflow/step/test_info.rb +17 -0
- data/test/scout/workflow/step/test_status.rb +0 -1
- data/test/scout/workflow/task/test_dependencies.rb +357 -0
- data/test/scout/workflow/task/test_inputs.rb +52 -0
- data/test/scout/workflow/test_definition.rb +18 -0
- data/test/scout/workflow/test_documentation.rb +24 -0
- data/test/scout/workflow/test_step.rb +109 -0
- data/test/scout/workflow/test_task.rb +0 -287
- data/test/test_scout.rb +9 -0
- metadata +89 -5
- data/scout_commands/workflow/task_old +0 -706
data/lib/scout/tsv.rb
CHANGED
@@ -10,10 +10,11 @@ require_relative 'tsv/traverse'
|
|
10
10
|
require_relative 'tsv/open'
|
11
11
|
require_relative 'tsv/attach'
|
12
12
|
require_relative 'tsv/change_id'
|
13
|
+
require_relative 'tsv/stream'
|
13
14
|
|
14
15
|
module TSV
|
15
16
|
extend MetaExtension
|
16
|
-
extension_attr :key_field, :fields, :type, :filename, :namespace, :unnamed, :identifiers
|
17
|
+
extension_attr :key_field, :fields, :type, :cast, :filename, :namespace, :unnamed, :identifiers
|
17
18
|
|
18
19
|
def self.str2options(str)
|
19
20
|
field_options,_sep, rest = str.partition("#")
|
@@ -40,7 +41,13 @@ module TSV
|
|
40
41
|
data = filename ? ScoutCabinet.open(filename, true, type) : nil
|
41
42
|
options[:data] = data if data
|
42
43
|
options[:filename] = file
|
43
|
-
|
44
|
+
|
45
|
+
if data
|
46
|
+
Log.debug "TSV open #{Log.fingerprint file} into #{Log.fingerprint data}"
|
47
|
+
else
|
48
|
+
Log.debug "TSV open #{Log.fingerprint file}"
|
49
|
+
end
|
50
|
+
|
44
51
|
Open.open(file, grep: grep, invert_grep: invert_grep) do |f|
|
45
52
|
TSV.parse(f, **options)
|
46
53
|
end
|
data/lib/scout/work_queue.rb
CHANGED
@@ -1,10 +1,12 @@
|
|
1
1
|
require_relative 'work_queue/socket'
|
2
2
|
require_relative 'work_queue/worker'
|
3
|
+
require 'timeout'
|
3
4
|
|
4
5
|
class WorkQueue
|
5
6
|
attr_accessor :workers, :worker_proc, :callback
|
6
7
|
|
7
8
|
def initialize(workers = 0, &block)
|
9
|
+
workers = workers.to_i if String === workers
|
8
10
|
@input = WorkQueue::Socket.new
|
9
11
|
@output = WorkQueue::Socket.new
|
10
12
|
@workers = workers.times.collect{ Worker.new }
|
@@ -38,7 +40,7 @@ class WorkQueue
|
|
38
40
|
@worker_mutex.synchronize do
|
39
41
|
worker = @workers.index{|w| w.pid == pid}
|
40
42
|
if worker
|
41
|
-
Log.
|
43
|
+
Log.low "Removed worker #{pid}"
|
42
44
|
@workers.delete_at(worker)
|
43
45
|
@removed_workers << pid
|
44
46
|
end
|
@@ -88,19 +90,32 @@ class WorkQueue
|
|
88
90
|
|
89
91
|
Thread.pass until @reader["name"]
|
90
92
|
|
93
|
+
Thread.pass until @worker_mutex.synchronize{ @workers.select{|w| w.pid.nil? }.empty? }
|
94
|
+
|
91
95
|
@waiter = Thread.new do
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
96
|
+
Thread.current.report_on_exception = false
|
97
|
+
Thread.current["name"] = "Worker waiter #{Process.pid}"
|
98
|
+
while true
|
99
|
+
break if @worker_mutex.synchronize{ @workers.empty? }
|
100
|
+
begin
|
101
|
+
Timeout.timeout(1) do
|
102
|
+
begin
|
103
|
+
pid, status = Process.wait2
|
104
|
+
remove_worker(pid) if pid
|
105
|
+
rescue Exception
|
106
|
+
Log.exception $!
|
107
|
+
end
|
108
|
+
end
|
109
|
+
rescue Timeout::Error
|
110
|
+
pids = @worker_mutex.synchronize{ @workers.collect{|w| w.pid } }
|
111
|
+
pids.each do |p|
|
112
|
+
pid, status = Process.wait2 p, Process::WNOHANG
|
113
|
+
remove_worker(pid) if pid
|
114
|
+
end
|
99
115
|
end
|
100
116
|
end
|
101
117
|
end
|
102
118
|
|
103
|
-
Thread.pass until @worker_mutex.synchronize{ @workers.select{|w| w.pid.nil? }.empty? }
|
104
119
|
Thread.pass until @waiter["name"]
|
105
120
|
end
|
106
121
|
|
@@ -128,8 +143,18 @@ class WorkQueue
|
|
128
143
|
end
|
129
144
|
end
|
130
145
|
|
131
|
-
def
|
132
|
-
@waiter.join if @waiter
|
133
|
-
@
|
146
|
+
def clean
|
147
|
+
@waiter.join if @waiter
|
148
|
+
@input.clean
|
149
|
+
@output.clean
|
150
|
+
end
|
151
|
+
|
152
|
+
def join(clean = true)
|
153
|
+
begin
|
154
|
+
@waiter.join if @waiter
|
155
|
+
@reader.join if @reader
|
156
|
+
ensure
|
157
|
+
self.clean if clean
|
158
|
+
end
|
134
159
|
end
|
135
160
|
end
|
@@ -50,7 +50,7 @@ module Workflow
|
|
50
50
|
|
51
51
|
def directory=(directory)
|
52
52
|
@directory = directory
|
53
|
-
@tasks.each{|name,d| d.directory = directory[name] } if @tasks
|
53
|
+
@tasks.each{|name,d| d.directory = Path === directory ? directory[name] : File.join(directory, name.to_s) } if @tasks
|
54
54
|
end
|
55
55
|
|
56
56
|
def annotate_next_task(type, obj)
|
@@ -103,6 +103,7 @@ module Workflow
|
|
103
103
|
def task(name_and_type, &block)
|
104
104
|
name, type = name_and_type.collect.first
|
105
105
|
@tasks ||= IndiferentHash.setup({})
|
106
|
+
block = self.method(name) if block.nil?
|
106
107
|
begin
|
107
108
|
@annotate_next_task ||= {}
|
108
109
|
@annotate_next_task[:extension] ||=
|
@@ -0,0 +1,254 @@
|
|
1
|
+
module Workflow
|
2
|
+
class Orchestrator
|
3
|
+
|
4
|
+
def self.job_workload(job)
|
5
|
+
workload = {job => []}
|
6
|
+
return workload if job.done? && job.updated?
|
7
|
+
|
8
|
+
job.dependencies.each do |dep|
|
9
|
+
next if dep.done? && dep.updated?
|
10
|
+
workload.merge!(job_workload(dep))
|
11
|
+
workload[job] += workload[dep]
|
12
|
+
workload[job] << dep
|
13
|
+
workload[job].uniq!
|
14
|
+
end
|
15
|
+
|
16
|
+
job.input_dependencies.each do |dep|
|
17
|
+
next if dep.done? && dep.updated?
|
18
|
+
workload.merge!(job_workload(dep))
|
19
|
+
workload[job] += workload[dep]
|
20
|
+
workload[job] << dep
|
21
|
+
workload[job].uniq!
|
22
|
+
end
|
23
|
+
|
24
|
+
workload
|
25
|
+
end
|
26
|
+
|
27
|
+
def self.workload(jobs)
|
28
|
+
jobs.inject({}) do |acc,job|
|
29
|
+
Orchestrator.job_workload(job).each do |j,d|
|
30
|
+
acc[j] = d unless acc.keys.collect{|k| k.path }.include? j.path
|
31
|
+
end
|
32
|
+
acc
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def self.job_rules(rules, job)
|
37
|
+
workflow = job.workflow.to_s
|
38
|
+
task_name = job.task_name.to_s
|
39
|
+
defaults = rules["defaults"] || {}
|
40
|
+
|
41
|
+
return IndiferentHash.setup(defaults) unless rules[workflow]
|
42
|
+
return IndiferentHash.setup(defaults) unless rules[workflow][task_name]
|
43
|
+
|
44
|
+
job_rules = IndiferentHash.setup(rules[workflow][task_name])
|
45
|
+
defaults.each{|k,v| job_rules[k] = v if job_rules[k].nil? } if defaults
|
46
|
+
job_rules
|
47
|
+
end
|
48
|
+
|
49
|
+
def self.purge_duplicates(candidates)
|
50
|
+
seen = Set.new
|
51
|
+
candidates.select do |job|
|
52
|
+
if seen.include? job.path
|
53
|
+
false
|
54
|
+
else
|
55
|
+
seen << job.path
|
56
|
+
true
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def self.job_resources(rules, job)
|
62
|
+
resources = (job_rules(rules, job) || {})["resources"] || {}
|
63
|
+
|
64
|
+
IndiferentHash.setup(resources)
|
65
|
+
|
66
|
+
default_resources = rules["default_resources"]
|
67
|
+
default_resources ||= rules["defaults"]["resources"] if rules["defaults"]
|
68
|
+
default_resources ||= {}
|
69
|
+
|
70
|
+
default_resources.each{|k,v| resources[k] ||= v } if default_resources
|
71
|
+
|
72
|
+
resources = {:cpus => 1} if resources.empty?
|
73
|
+
resources
|
74
|
+
end
|
75
|
+
|
76
|
+
def self.sort_candidates(candidates, rules)
|
77
|
+
seen = Set.new
|
78
|
+
candidates.sort_by do |job|
|
79
|
+
- job_resources(rules, job).values.inject(0){|acc,e| acc += e}
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def self.candidates(workload, rules)
|
84
|
+
if rules.empty?
|
85
|
+
candidates = workload.
|
86
|
+
select{|k,v| v.empty? }.
|
87
|
+
collect{|k,v| k }.
|
88
|
+
reject{|k| k.done? || k.running? }
|
89
|
+
else
|
90
|
+
candidates = workload. #select{|k,v| Orchestrator.job_rules(rules, k) }.
|
91
|
+
select{|k,v| v.empty? }.
|
92
|
+
collect{|k,v| k }.
|
93
|
+
reject{|k| k.done? || k.running? }
|
94
|
+
end
|
95
|
+
|
96
|
+
#top_level = workload.keys - workload.values.flatten
|
97
|
+
|
98
|
+
candidates = purge_duplicates candidates
|
99
|
+
candidates = sort_candidates candidates, rules
|
100
|
+
|
101
|
+
candidates
|
102
|
+
end
|
103
|
+
|
104
|
+
def self.process(*args)
|
105
|
+
self.new.process(*args)
|
106
|
+
end
|
107
|
+
|
108
|
+
attr_accessor :available_resources, :resources_requested, :resources_used, :timer
|
109
|
+
|
110
|
+
def initialize(timer = 5, available_resources = {})
|
111
|
+
available_resources = {:cpus => Etc.nprocessors } if available_resources.nil?
|
112
|
+
@timer = timer
|
113
|
+
@available_resources = IndiferentHash.setup(available_resources)
|
114
|
+
@resources_requested = IndiferentHash.setup({})
|
115
|
+
@resources_used = IndiferentHash.setup({})
|
116
|
+
end
|
117
|
+
|
118
|
+
def release_resources(job)
|
119
|
+
if resources_used[job]
|
120
|
+
Log.debug "Orchestrator releasing resouces from #{job.path}"
|
121
|
+
resources_used[job].each do |resource,value|
|
122
|
+
next if resource == 'size'
|
123
|
+
resources_requested[resource] -= value.to_i
|
124
|
+
end
|
125
|
+
resources_used.delete job
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
def check_resources(rules, job)
|
130
|
+
resources = Orchestrator.job_resources(rules, job)
|
131
|
+
|
132
|
+
limit_resources = resources.select{|resource,value| available_resources[resource] && ((resources_requested[resource] || 0) + value) > available_resources[resource] }.collect{|resource,v| resource }
|
133
|
+
if limit_resources.any?
|
134
|
+
Log.debug "Orchestrator waiting on #{job.path} due to #{limit_resources * ", "}"
|
135
|
+
else
|
136
|
+
|
137
|
+
resources_used[job] = resources
|
138
|
+
resources.each do |resource,value|
|
139
|
+
resources_requested[resource] ||= 0
|
140
|
+
resources_requested[resource] += value.to_i
|
141
|
+
end
|
142
|
+
Log.low "Orchestrator producing #{job.path} with resources #{resources}"
|
143
|
+
|
144
|
+
return yield
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
def run_with_rules(rules, job)
|
149
|
+
job_rules = Orchestrator.job_rules(rules, job)
|
150
|
+
|
151
|
+
Scout::Config.with_config do
|
152
|
+
job_rules[:config_keys].each do |config|
|
153
|
+
Scout::Config.process_config config
|
154
|
+
end if job_rules && job_rules[:config_keys]
|
155
|
+
|
156
|
+
log = job_rules[:log] if job_rules
|
157
|
+
log = Log.severity if log.nil?
|
158
|
+
Log.with_severity log do
|
159
|
+
job.fork
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
def erase_job_dependencies(job, rules, all_jobs, top_level_jobs)
|
165
|
+
job.dependencies.each do |dep|
|
166
|
+
next if top_level_jobs.include? dep.path
|
167
|
+
next unless Orchestrator.job_rules(rules, dep)["erase"].to_s == 'true'
|
168
|
+
|
169
|
+
dep_path = dep.path
|
170
|
+
parents = all_jobs.select do |parent|
|
171
|
+
paths = parent.info[:dependencies].nil? ? parent.dependencies.collect{|d| d.path } : parent.info[:dependencies].collect{|d| d.last }
|
172
|
+
paths.include? dep_path
|
173
|
+
end
|
174
|
+
|
175
|
+
next unless parents.reject{|parent| parent.done? }.empty?
|
176
|
+
|
177
|
+
parents.each do |parent|
|
178
|
+
Log.high "Erasing #{dep.path} from #{parent.path}"
|
179
|
+
parent.archive_deps
|
180
|
+
parent.copy_files_dir
|
181
|
+
parent.dependencies = parent.dependencies - [dep]
|
182
|
+
end
|
183
|
+
dep.clean
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
def process(rules, jobs = nil)
|
188
|
+
jobs, rules = rules, {} if jobs.nil?
|
189
|
+
jobs = [jobs] if Step === jobs
|
190
|
+
begin
|
191
|
+
|
192
|
+
workload = Orchestrator.workload(jobs)
|
193
|
+
all_jobs = workload.keys
|
194
|
+
|
195
|
+
all_jobs.each{|job| job.clean unless job.done? && job.updated? }
|
196
|
+
|
197
|
+
top_level_jobs = jobs.collect{|job| job.path }
|
198
|
+
failed_jobs = []
|
199
|
+
while workload.any?
|
200
|
+
|
201
|
+
candidates = resources_used.keys + Orchestrator.candidates(workload, rules)
|
202
|
+
candidates.uniq!
|
203
|
+
raise "No candidates and no running jobs" if candidates.empty?
|
204
|
+
|
205
|
+
candidates.each do |job|
|
206
|
+
case
|
207
|
+
when (job.error? || job.aborted?)
|
208
|
+
begin
|
209
|
+
if job.recoverable_error?
|
210
|
+
if failed_jobs.include?(job)
|
211
|
+
Log.warn "Failed twice #{job.path} with recoverable error"
|
212
|
+
next
|
213
|
+
else
|
214
|
+
failed_jobs << job
|
215
|
+
job.clean
|
216
|
+
raise TryAgain
|
217
|
+
end
|
218
|
+
else
|
219
|
+
next
|
220
|
+
end
|
221
|
+
ensure
|
222
|
+
Log.warn "Releases resources from failed job: #{job.path}"
|
223
|
+
release_resources(job)
|
224
|
+
end
|
225
|
+
when job.done?
|
226
|
+
Log.debug "Orchestrator done #{job.path}"
|
227
|
+
release_resources(job)
|
228
|
+
erase_job_dependencies(job, rules, all_jobs, top_level_jobs)
|
229
|
+
|
230
|
+
when job.running?
|
231
|
+
next
|
232
|
+
|
233
|
+
else
|
234
|
+
check_resources(rules, job) do
|
235
|
+
run_with_rules(rules, job)
|
236
|
+
end
|
237
|
+
end
|
238
|
+
end
|
239
|
+
|
240
|
+
new_workload = {}
|
241
|
+
workload.each do |k,v|
|
242
|
+
next if k.done? || k.error? || k.aborted?
|
243
|
+
#new_workload[k] = v.reject{|d| d.done? || ((d.error? || d.aborted?) && ! d.recoverable_error?)}
|
244
|
+
new_workload[k] = v.reject{|d| d.done? || d.error? || d.aborted?}
|
245
|
+
end
|
246
|
+
workload = new_workload
|
247
|
+
sleep timer
|
248
|
+
end
|
249
|
+
rescue TryAgain
|
250
|
+
retry
|
251
|
+
end
|
252
|
+
end
|
253
|
+
end
|
254
|
+
end
|
@@ -0,0 +1 @@
|
|
1
|
+
require_relative 'deployment/orchestrator'
|
@@ -1,7 +1,13 @@
|
|
1
1
|
class Step
|
2
|
-
def rec_dependencies
|
3
|
-
|
4
|
-
dependencies.
|
2
|
+
def rec_dependencies(connected = false, seen = [])
|
3
|
+
direct_deps = []
|
4
|
+
dependencies.each do |dep|
|
5
|
+
next if seen.include? dep.path
|
6
|
+
next if connected && dep.done? && dep.updated?
|
7
|
+
direct_deps << dep
|
8
|
+
end
|
9
|
+
seen.concat direct_deps.collect{|d| d.path }
|
10
|
+
direct_deps.inject(direct_deps){|acc,d| acc.concat(d.rec_dependencies(connected, [])); acc }
|
5
11
|
end
|
6
12
|
|
7
13
|
def recursive_inputs
|
@@ -13,15 +19,21 @@ class Step
|
|
13
19
|
|
14
20
|
def input_dependencies
|
15
21
|
return [] unless inputs
|
16
|
-
inputs.
|
17
|
-
Step === d
|
18
|
-
|
22
|
+
inputs.collect do |d|
|
23
|
+
if Step === d
|
24
|
+
d
|
25
|
+
elsif (Path === d) && (Step === d.pkgdir)
|
26
|
+
d.pkgdir
|
27
|
+
else
|
28
|
+
nil
|
29
|
+
end
|
30
|
+
end.compact.uniq
|
19
31
|
end
|
20
32
|
|
21
33
|
def prepare_dependencies
|
22
34
|
inverse_dep = {}
|
23
|
-
dependencies.each{|dep|
|
24
|
-
if dep.present? && ! dep.updated?
|
35
|
+
dependencies.each{|dep|
|
36
|
+
if dep.present? && ! dep.updated?
|
25
37
|
Log.debug "Clean outdated #{dep.path}"
|
26
38
|
dep.clean
|
27
39
|
end
|
@@ -29,12 +41,12 @@ class Step
|
|
29
41
|
if dep.dependencies
|
30
42
|
dep.dependencies.each do |d|
|
31
43
|
inverse_dep[d] ||= []
|
32
|
-
inverse_dep[d] << dep
|
44
|
+
inverse_dep[d] << dep
|
33
45
|
end
|
34
46
|
end
|
35
47
|
input_dependencies.each do |d|
|
36
48
|
inverse_dep[d] ||= []
|
37
|
-
inverse_dep[d] << dep
|
49
|
+
inverse_dep[d] << dep
|
38
50
|
end
|
39
51
|
}
|
40
52
|
inverse_dep.each do |dep,list|
|
@@ -43,7 +55,25 @@ class Step
|
|
43
55
|
end
|
44
56
|
|
45
57
|
def run_dependencies
|
46
|
-
dependencies.each{|dep|
|
58
|
+
dependencies.each{|dep|
|
59
|
+
next if dep.running? || dep.done?
|
60
|
+
compute_options = compute[dep.path] if compute
|
61
|
+
compute_options = [] if compute_options.nil?
|
62
|
+
|
63
|
+
stream = compute_options.include?(:stream)
|
64
|
+
stream = true unless ENV["SCOUT_EXPLICIT_STREAMING"] == 'true'
|
65
|
+
stream = false if compute_options.include?(:produce)
|
66
|
+
|
67
|
+
begin
|
68
|
+
dep.run(stream)
|
69
|
+
rescue ScoutException
|
70
|
+
if compute_options.include?(:canfail)
|
71
|
+
Log.medium "Allow failing of #{dep.path}"
|
72
|
+
else
|
73
|
+
raise $!
|
74
|
+
end
|
75
|
+
end
|
76
|
+
}
|
47
77
|
end
|
48
78
|
|
49
79
|
def abort_dependencies
|
@@ -52,9 +82,11 @@ class Step
|
|
52
82
|
|
53
83
|
def self.wait_for_jobs(jobs)
|
54
84
|
threads = []
|
55
|
-
jobs.each do |job|
|
56
|
-
threads << job.join
|
85
|
+
jobs.each do |job|
|
86
|
+
threads << Thread.new{ job.join }
|
87
|
+
end
|
88
|
+
threads.each do |t|
|
89
|
+
t.join
|
57
90
|
end
|
58
|
-
threads.each do |t| t.join end
|
59
91
|
end
|
60
92
|
end
|
@@ -3,6 +3,7 @@ class Step
|
|
3
3
|
@files_dir ||= begin
|
4
4
|
dir = @path + ".files"
|
5
5
|
@path.annotate(dir) if Path === @path
|
6
|
+
dir.pkgdir = self
|
6
7
|
dir
|
7
8
|
end
|
8
9
|
end
|
@@ -12,4 +13,8 @@ class Step
|
|
12
13
|
Path.setup(dir) unless Path === dir
|
13
14
|
dir[file]
|
14
15
|
end
|
16
|
+
|
17
|
+
def bundle_files
|
18
|
+
[path, info_file, Dir.glob(File.join(files_dir,"**/*"))].flatten.select{|f| Open.exist?(f) }
|
19
|
+
end
|
15
20
|
end
|
@@ -43,7 +43,7 @@ class Step
|
|
43
43
|
new_info.each do |key,value|
|
44
44
|
if key == :status
|
45
45
|
message = new_info[:messages]
|
46
|
-
if message.nil? && value == :done || value == :error || value == :aborted
|
46
|
+
if message.nil? && (value == :done || value == :error || value == :aborted)
|
47
47
|
start = info[:start]
|
48
48
|
eend = new_info[:end]
|
49
49
|
if start && eend
|
@@ -54,6 +54,7 @@ class Step
|
|
54
54
|
end
|
55
55
|
report_status value, message
|
56
56
|
end
|
57
|
+
|
57
58
|
if Exception === value
|
58
59
|
begin
|
59
60
|
Marshal.dump(value)
|
@@ -67,6 +68,7 @@ class Step
|
|
67
68
|
value = new
|
68
69
|
end
|
69
70
|
end
|
71
|
+
|
70
72
|
if info.include?(key)
|
71
73
|
case info[key]
|
72
74
|
when Array
|
@@ -114,17 +116,25 @@ class Step
|
|
114
116
|
end
|
115
117
|
|
116
118
|
def error?
|
117
|
-
status == :error
|
119
|
+
status == :error || status == 'error'
|
118
120
|
end
|
119
121
|
|
120
122
|
def aborted?
|
121
|
-
status == :aborted
|
123
|
+
status == :aborted || status == 'aborted'
|
122
124
|
end
|
123
125
|
|
124
126
|
def running?
|
125
127
|
! done? && (info[:pid] && Misc.pid_alive?(info[:pid]))
|
126
128
|
end
|
127
129
|
|
130
|
+
def overriden?
|
131
|
+
overriden_task || overriden_workflow || dependencies.select{|d| d.overriden? }.any?
|
132
|
+
end
|
133
|
+
|
134
|
+
def overriden_deps
|
135
|
+
rec_dependencies.select{|d| d.overriden? }
|
136
|
+
end
|
137
|
+
|
128
138
|
def exception
|
129
139
|
info[:exception]
|
130
140
|
end
|
@@ -91,6 +91,7 @@ class Step
|
|
91
91
|
info[:task_name] = task
|
92
92
|
path = step.path
|
93
93
|
status = info[:status] || :missing
|
94
|
+
status = status.to_sym if String === status
|
94
95
|
status = :noinfo if status == :missing && Open.exist?(path)
|
95
96
|
status = "remote" if Open.remote?(path) || Open.ssh?(path)
|
96
97
|
name = info[:name] || File.basename(path)
|
@@ -1,7 +1,11 @@
|
|
1
1
|
class Step
|
2
2
|
def abort(exception = nil)
|
3
|
-
|
4
|
-
|
3
|
+
if (pid = info[:pid]) && pid != Process.pid && Misc.pid_alive?(pid)
|
4
|
+
Process.kill pid
|
5
|
+
else
|
6
|
+
while @result && streaming? && stream = self.stream
|
7
|
+
stream.abort(exception)
|
8
|
+
end
|
5
9
|
end
|
6
10
|
end
|
7
11
|
|
@@ -11,7 +15,7 @@ class Step
|
|
11
15
|
|
12
16
|
def updated?
|
13
17
|
return false if self.error? && self.recoverable_error?
|
14
|
-
return true
|
18
|
+
return true if self.done? && ! ENV["SCOUT_UPDATE"]
|
15
19
|
newer = rec_dependencies.select{|dep| Path.newer?(self.path, dep.path) }
|
16
20
|
newer += input_dependencies.select{|dep| Path.newer?(self.path, dep.path) }
|
17
21
|
|
@@ -24,16 +28,11 @@ class Step
|
|
24
28
|
@info = nil
|
25
29
|
@info_load_time = nil
|
26
30
|
Open.rm path if Open.exist?(path)
|
31
|
+
Open.rm tmp_path if Open.exist?(tmp_path)
|
27
32
|
Open.rm info_file if Open.exist?(info_file)
|
28
33
|
Open.rm_rf files_dir if Open.exist?(files_dir)
|
29
34
|
end
|
30
35
|
|
31
|
-
def present?
|
32
|
-
Open.exist?(path) &&
|
33
|
-
Open.exist?(info_file) &&
|
34
|
-
Open.exist?(files_dir)
|
35
|
-
end
|
36
|
-
|
37
36
|
|
38
37
|
def recursive_clean
|
39
38
|
dependencies.each do |dep|
|
@@ -42,4 +41,23 @@ class Step
|
|
42
41
|
clean
|
43
42
|
end
|
44
43
|
|
44
|
+
def canfail?
|
45
|
+
@compute && @compute.include?(:canfail)
|
46
|
+
end
|
47
|
+
|
48
|
+
def started?
|
49
|
+
return true if done?
|
50
|
+
return false unless Open.exist?(info_file)
|
51
|
+
pid = info[:pid]
|
52
|
+
return false unless pid
|
53
|
+
return Misc.pid_alive?(pid)
|
54
|
+
end
|
55
|
+
|
56
|
+
def waiting?
|
57
|
+
present? and not started?
|
58
|
+
end
|
59
|
+
|
60
|
+
def dirty?
|
61
|
+
done? && ! updated?
|
62
|
+
end
|
45
63
|
end
|