scout-gear 7.3.0 → 8.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.vimproject +44 -16
- data/Rakefile +6 -1
- data/VERSION +1 -1
- data/bin/scout +21 -7
- data/doc/lib/scout/path.md +35 -0
- data/doc/lib/scout/workflow/task.md +13 -0
- data/lib/rbbt-scout.rb +1 -0
- data/lib/scout/cmd.rb +24 -25
- data/lib/scout/concurrent_stream.rb +59 -39
- data/lib/scout/config.rb +1 -1
- data/lib/scout/exceptions.rb +10 -0
- data/lib/scout/log/color.rb +15 -12
- data/lib/scout/log/progress/report.rb +8 -6
- data/lib/scout/log/progress/util.rb +61 -54
- data/lib/scout/log/progress.rb +1 -1
- data/lib/scout/log/trap.rb +107 -0
- data/lib/scout/log.rb +115 -52
- data/lib/scout/meta_extension.rb +47 -6
- data/lib/scout/misc/digest.rb +12 -3
- data/lib/scout/misc/format.rb +24 -7
- data/lib/scout/misc/insist.rb +1 -1
- data/lib/scout/misc/monitor.rb +22 -0
- data/lib/scout/misc/system.rb +58 -0
- data/lib/scout/named_array.rb +73 -3
- data/lib/scout/offsite/ssh.rb +171 -0
- data/lib/scout/offsite/step.rb +83 -0
- data/lib/scout/offsite/sync.rb +55 -0
- data/lib/scout/offsite.rb +3 -0
- data/lib/scout/open/lock/lockfile.rb +587 -0
- data/lib/scout/open/lock.rb +9 -2
- data/lib/scout/open/remote.rb +16 -1
- data/lib/scout/open/stream.rb +146 -83
- data/lib/scout/open/util.rb +22 -3
- data/lib/scout/open.rb +5 -4
- data/lib/scout/path/find.rb +24 -11
- data/lib/scout/path/util.rb +40 -0
- data/lib/scout/persist/serialize.rb +19 -6
- data/lib/scout/persist.rb +29 -13
- data/lib/scout/resource/path.rb +57 -0
- data/lib/scout/resource/produce.rb +0 -8
- data/lib/scout/resource/util.rb +12 -5
- data/lib/scout/tmpfile.rb +7 -8
- data/lib/scout/tsv/attach.rb +177 -0
- data/lib/scout/tsv/change_id.rb +40 -0
- data/lib/scout/tsv/dumper.rb +74 -46
- data/lib/scout/tsv/index.rb +85 -87
- data/lib/scout/tsv/open.rb +160 -85
- data/lib/scout/tsv/parser.rb +142 -80
- data/lib/scout/tsv/path.rb +1 -2
- data/lib/scout/tsv/persist/adapter.rb +15 -45
- data/lib/scout/tsv/persist/fix_width_table.rb +3 -0
- data/lib/scout/tsv/persist/tokyocabinet.rb +6 -1
- data/lib/scout/tsv/persist.rb +4 -0
- data/lib/scout/tsv/stream.rb +204 -0
- data/lib/scout/tsv/transformer.rb +152 -0
- data/lib/scout/tsv/traverse.rb +96 -92
- data/lib/scout/tsv/util/filter.rb +9 -0
- data/lib/scout/tsv/util/reorder.rb +81 -0
- data/lib/scout/tsv/util/select.rb +78 -33
- data/lib/scout/tsv/util/unzip.rb +86 -0
- data/lib/scout/tsv/util.rb +60 -11
- data/lib/scout/tsv.rb +34 -4
- data/lib/scout/work_queue/socket.rb +6 -1
- data/lib/scout/work_queue/worker.rb +5 -2
- data/lib/scout/work_queue.rb +51 -20
- data/lib/scout/workflow/definition.rb +23 -3
- data/lib/scout/workflow/deployment/orchestrator.rb +245 -0
- data/lib/scout/workflow/deployment.rb +1 -0
- data/lib/scout/workflow/step/dependencies.rb +56 -10
- data/lib/scout/workflow/step/file.rb +5 -0
- data/lib/scout/workflow/step/info.rb +40 -7
- data/lib/scout/workflow/step/load.rb +1 -1
- data/lib/scout/workflow/step/provenance.rb +9 -7
- data/lib/scout/workflow/step/status.rb +43 -0
- data/lib/scout/workflow/step.rb +160 -49
- data/lib/scout/workflow/task/dependencies.rb +114 -0
- data/lib/scout/workflow/task/inputs.rb +40 -32
- data/lib/scout/workflow/task.rb +38 -102
- data/lib/scout/workflow/usage.rb +48 -18
- data/lib/scout/workflow.rb +4 -2
- data/lib/scout-gear.rb +2 -0
- data/lib/scout.rb +6 -0
- data/scout-gear.gemspec +52 -23
- data/scout_commands/doc +37 -0
- data/scout_commands/find +1 -0
- data/scout_commands/offsite +30 -0
- data/scout_commands/update +29 -0
- data/scout_commands/workflow/info +15 -3
- data/scout_commands/workflow/install +102 -0
- data/scout_commands/workflow/task +57 -9
- data/test/scout/offsite/test_ssh.rb +15 -0
- data/test/scout/offsite/test_step.rb +33 -0
- data/test/scout/offsite/test_sync.rb +36 -0
- data/test/scout/offsite/test_task.rb +0 -0
- data/test/scout/open/test_stream.rb +60 -58
- data/test/scout/path/test_find.rb +10 -1
- data/test/scout/resource/test_path.rb +6 -0
- data/test/scout/resource/test_produce.rb +15 -0
- data/test/scout/test_meta_extension.rb +25 -0
- data/test/scout/test_named_array.rb +24 -0
- data/test/scout/test_persist.rb +9 -2
- data/test/scout/test_tsv.rb +229 -2
- data/test/scout/test_work_queue.rb +65 -41
- data/test/scout/tsv/persist/test_tokyocabinet.rb +29 -1
- data/test/scout/tsv/test_attach.rb +227 -0
- data/test/scout/tsv/test_change_id.rb +98 -0
- data/test/scout/tsv/test_dumper.rb +1 -1
- data/test/scout/tsv/test_index.rb +49 -3
- data/test/scout/tsv/test_open.rb +160 -2
- data/test/scout/tsv/test_parser.rb +33 -2
- data/test/scout/tsv/test_persist.rb +2 -0
- data/test/scout/tsv/test_stream.rb +200 -0
- data/test/scout/tsv/test_transformer.rb +120 -0
- data/test/scout/tsv/test_traverse.rb +88 -3
- data/test/scout/tsv/test_util.rb +1 -0
- data/test/scout/tsv/util/test_reorder.rb +94 -0
- data/test/scout/tsv/util/test_select.rb +25 -11
- data/test/scout/tsv/util/test_unzip.rb +112 -0
- data/test/scout/work_queue/test_socket.rb +0 -1
- data/test/scout/workflow/deployment/test_orchestrator.rb +272 -0
- data/test/scout/workflow/step/test_dependencies.rb +68 -0
- data/test/scout/workflow/step/test_info.rb +18 -0
- data/test/scout/workflow/step/test_status.rb +30 -0
- data/test/scout/workflow/task/test_dependencies.rb +355 -0
- data/test/scout/workflow/task/test_inputs.rb +67 -14
- data/test/scout/workflow/test_definition.rb +18 -0
- data/test/scout/workflow/test_documentation.rb +24 -0
- data/test/scout/workflow/test_step.rb +112 -3
- data/test/scout/workflow/test_task.rb +0 -151
- data/test/scout/workflow/test_usage.rb +33 -6
- data/test/test_scout.rb +9 -0
- metadata +100 -8
- data/scout_commands/workflow/task_old +0 -706
@@ -0,0 +1,245 @@
|
|
1
|
+
module Workflow
|
2
|
+
class Orchestrator
|
3
|
+
|
4
|
+
def self.job_workload(job)
|
5
|
+
workload = {job => []}
|
6
|
+
return workload if job.done? && job.updated?
|
7
|
+
|
8
|
+
job.dependencies.each do |dep|
|
9
|
+
next if dep.done? && job.updated?
|
10
|
+
workload.merge!(job_workload(dep))
|
11
|
+
workload[job] += workload[dep]
|
12
|
+
workload[job] << dep
|
13
|
+
workload[job].uniq!
|
14
|
+
end
|
15
|
+
|
16
|
+
job.input_dependencies.each do |dep|
|
17
|
+
next if dep.done? && job.updated?
|
18
|
+
workload.merge!(job_workload(dep))
|
19
|
+
workload[job] += workload[dep]
|
20
|
+
workload[job] << dep
|
21
|
+
workload[job].uniq!
|
22
|
+
end
|
23
|
+
|
24
|
+
workload
|
25
|
+
end
|
26
|
+
|
27
|
+
def self.workload(jobs)
|
28
|
+
jobs.inject({}) do |acc,job|
|
29
|
+
Orchestrator.job_workload(job).each do |j,d|
|
30
|
+
acc[j] = d unless acc.keys.collect{|k| k.path }.include? j.path
|
31
|
+
end
|
32
|
+
acc
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def self.job_rules(rules, job)
|
37
|
+
workflow = job.workflow.to_s
|
38
|
+
task_name = job.task_name.to_s
|
39
|
+
defaults = rules["defaults"] || {}
|
40
|
+
|
41
|
+
return IndiferentHash.setup(defaults) unless rules[workflow]
|
42
|
+
return IndiferentHash.setup(defaults) unless rules[workflow][task_name]
|
43
|
+
|
44
|
+
job_rules = IndiferentHash.setup(rules[workflow][task_name])
|
45
|
+
defaults.each{|k,v| job_rules[k] = v if job_rules[k].nil? } if defaults
|
46
|
+
job_rules
|
47
|
+
end
|
48
|
+
|
49
|
+
def self.purge_duplicates(candidates)
|
50
|
+
seen = Set.new
|
51
|
+
candidates.select do |job|
|
52
|
+
if seen.include? job.path
|
53
|
+
false
|
54
|
+
else
|
55
|
+
seen << job.path
|
56
|
+
true
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
def self.job_resources(rules, job)
|
62
|
+
resources = (job_rules(rules, job) || {})["resources"] || {}
|
63
|
+
|
64
|
+
IndiferentHash.setup(resources)
|
65
|
+
|
66
|
+
default_resources = rules["default_resources"]
|
67
|
+
default_resources ||= rules["defaults"]["resources"] if rules["defaults"]
|
68
|
+
default_resources ||= {}
|
69
|
+
|
70
|
+
default_resources.each{|k,v| resources[k] ||= v } if default_resources
|
71
|
+
|
72
|
+
resources = {:cpus => 1} if resources.empty?
|
73
|
+
resources
|
74
|
+
end
|
75
|
+
|
76
|
+
def self.sort_candidates(candidates, rules)
|
77
|
+
seen = Set.new
|
78
|
+
candidates.sort_by do |job|
|
79
|
+
- job_resources(rules, job).values.inject(0){|acc,e| acc += e}
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def self.candidates(workload, rules)
|
84
|
+
if rules.empty?
|
85
|
+
candidates = workload.
|
86
|
+
select{|k,v| v.empty? }.
|
87
|
+
collect{|k,v| k }.
|
88
|
+
reject{|k| k.done? }
|
89
|
+
else
|
90
|
+
candidates = workload. #select{|k,v| Orchestrator.job_rules(rules, k) }.
|
91
|
+
select{|k,v| v.empty? }.
|
92
|
+
collect{|k,v| k }.
|
93
|
+
reject{|k| k.done? }
|
94
|
+
end
|
95
|
+
|
96
|
+
top_level = workload.keys - workload.values.flatten
|
97
|
+
|
98
|
+
candidates = purge_duplicates candidates
|
99
|
+
candidates = sort_candidates candidates, rules
|
100
|
+
|
101
|
+
candidates
|
102
|
+
end
|
103
|
+
|
104
|
+
def self.process(*args)
|
105
|
+
self.new.process(*args)
|
106
|
+
end
|
107
|
+
|
108
|
+
attr_accessor :available_resources, :resources_requested, :resources_used, :timer
|
109
|
+
|
110
|
+
def initialize(timer = 5, available_resources = {})
|
111
|
+
available_resources = {:cpus => Etc.nprocessors } if available_resources.nil?
|
112
|
+
@timer = timer
|
113
|
+
@available_resources = IndiferentHash.setup(available_resources)
|
114
|
+
@resources_requested = IndiferentHash.setup({})
|
115
|
+
@resources_used = IndiferentHash.setup({})
|
116
|
+
end
|
117
|
+
|
118
|
+
def release_resources(job)
|
119
|
+
if resources_used[job]
|
120
|
+
Log.debug "Orchestrator releasing resouces from #{job.path}"
|
121
|
+
resources_used[job].each do |resource,value|
|
122
|
+
next if resource == 'size'
|
123
|
+
resources_requested[resource] -= value.to_i
|
124
|
+
end
|
125
|
+
resources_used.delete job
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
def check_resources(rules, job)
|
130
|
+
resources = Orchestrator.job_resources(rules, job)
|
131
|
+
|
132
|
+
limit_resources = resources.select{|resource,value| available_resources[resource] && ((resources_requested[resource] || 0) + value) > available_resources[resource] }.collect{|resource,v| resource }
|
133
|
+
if limit_resources.any?
|
134
|
+
Log.debug "Orchestrator waiting on #{job.path} due to #{limit_resources * ", "}"
|
135
|
+
else
|
136
|
+
|
137
|
+
resources_used[job] = resources
|
138
|
+
resources.each do |resource,value|
|
139
|
+
resources_requested[resource] ||= 0
|
140
|
+
resources_requested[resource] += value.to_i
|
141
|
+
end
|
142
|
+
Log.low "Orchestrator producing #{job.path} with resources #{resources}"
|
143
|
+
|
144
|
+
return yield
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
def run_with_rules(rules, job)
|
149
|
+
job_rules = Orchestrator.job_rules(rules, job)
|
150
|
+
|
151
|
+
Scout::Config.with_config do
|
152
|
+
job_rules[:config_keys].each do |config|
|
153
|
+
Scout::Config.process_config config
|
154
|
+
end if job_rules && job_rules[:config_keys]
|
155
|
+
|
156
|
+
log = job_rules[:log] if job_rules
|
157
|
+
log = Log.severity if log.nil?
|
158
|
+
Log.with_severity log do
|
159
|
+
job.fork
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
def erase_job_dependencies(job, rules, all_jobs, top_level_jobs)
|
165
|
+
job.dependencies.each do |dep|
|
166
|
+
next if top_level_jobs.include? dep.path
|
167
|
+
next unless Orchestrator.job_rules(rules, dep)["erase"].to_s == 'true'
|
168
|
+
|
169
|
+
dep_path = dep.path
|
170
|
+
parents = all_jobs.select do |parent|
|
171
|
+
paths = parent.info[:dependencies].nil? ? parent.dependencies.collect{|d| d.path } : parent.info[:dependencies].collect{|d| d.last }
|
172
|
+
paths.include? dep_path
|
173
|
+
end
|
174
|
+
|
175
|
+
next unless parents.reject{|parent| parent.done? }.empty?
|
176
|
+
|
177
|
+
parents.each do |parent|
|
178
|
+
Log.high "Erasing #{dep.path} from #{parent.path}"
|
179
|
+
parent.archive_deps
|
180
|
+
parent.copy_files_dir
|
181
|
+
parent.dependencies = parent.dependencies - [dep]
|
182
|
+
end
|
183
|
+
dep.clean
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
def process(rules, jobs = nil)
|
188
|
+
jobs, rules = rules, {} if jobs.nil?
|
189
|
+
jobs = [jobs] if Step === jobs
|
190
|
+
begin
|
191
|
+
|
192
|
+
workload = Orchestrator.workload(jobs)
|
193
|
+
all_jobs = workload.keys
|
194
|
+
|
195
|
+
top_level_jobs = jobs.collect{|job| job.path }
|
196
|
+
while workload.any?
|
197
|
+
|
198
|
+
candidates = resources_used.keys + Orchestrator.candidates(workload, rules)
|
199
|
+
candidates.uniq!
|
200
|
+
raise "No candidates and no running jobs" if candidates.empty?
|
201
|
+
|
202
|
+
candidates.each do |job|
|
203
|
+
case
|
204
|
+
when (job.error? || job.aborted?)
|
205
|
+
begin
|
206
|
+
if job.recoverable_error?
|
207
|
+
job.clean
|
208
|
+
raise TryAgain
|
209
|
+
else
|
210
|
+
next
|
211
|
+
end
|
212
|
+
ensure
|
213
|
+
Log.warn "Releases resources from failed job: #{job.path}"
|
214
|
+
release_resources(job)
|
215
|
+
end
|
216
|
+
when job.done?
|
217
|
+
Log.debug "Orchestrator done #{job.path}"
|
218
|
+
release_resources(job)
|
219
|
+
erase_job_dependencies(job, rules, all_jobs, top_level_jobs)
|
220
|
+
|
221
|
+
when job.running?
|
222
|
+
next
|
223
|
+
|
224
|
+
else
|
225
|
+
check_resources(rules, job) do
|
226
|
+
run_with_rules(rules, job)
|
227
|
+
end
|
228
|
+
end
|
229
|
+
end
|
230
|
+
|
231
|
+
new_workload = {}
|
232
|
+
workload.each do |k,v|
|
233
|
+
next if k.done? || k.error? || k.aborted?
|
234
|
+
#new_workload[k] = v.reject{|d| d.done? || ((d.error? || d.aborted?) && ! d.recoverable_error?)}
|
235
|
+
new_workload[k] = v.reject{|d| d.done? || d.error? || d.aborted?}
|
236
|
+
end
|
237
|
+
workload = new_workload
|
238
|
+
sleep timer
|
239
|
+
end
|
240
|
+
rescue TryAgain
|
241
|
+
retry
|
242
|
+
end
|
243
|
+
end
|
244
|
+
end
|
245
|
+
end
|
@@ -0,0 +1 @@
|
|
1
|
+
require_relative 'deployment/orchestrator'
|
@@ -1,31 +1,46 @@
|
|
1
1
|
class Step
|
2
|
+
def rec_dependencies
|
3
|
+
rec_dependencies = dependencies.dup
|
4
|
+
dependencies.inject(rec_dependencies){|acc,d| acc.concat d.rec_dependencies }
|
5
|
+
end
|
6
|
+
|
2
7
|
def recursive_inputs
|
3
|
-
|
4
|
-
|
5
|
-
acc
|
8
|
+
recursive_inputs = @inputs.to_hash
|
9
|
+
dependencies.inject(recursive_inputs) do |acc,dep|
|
10
|
+
acc.merge(dep.recursive_inputs)
|
6
11
|
end
|
7
12
|
end
|
8
13
|
|
9
14
|
def input_dependencies
|
10
15
|
return [] unless inputs
|
11
|
-
inputs.
|
12
|
-
Step === d
|
13
|
-
|
16
|
+
inputs.collect do |d|
|
17
|
+
if Step === d
|
18
|
+
d
|
19
|
+
elsif (Path === d) && (Step === d.pkgdir)
|
20
|
+
d.pkgdir
|
21
|
+
else
|
22
|
+
nil
|
23
|
+
end
|
24
|
+
end.compact.uniq
|
14
25
|
end
|
15
26
|
|
16
27
|
def prepare_dependencies
|
17
28
|
inverse_dep = {}
|
18
|
-
dependencies.each{|dep|
|
29
|
+
dependencies.each{|dep|
|
30
|
+
if dep.present? && ! dep.updated?
|
31
|
+
Log.debug "Clean outdated #{dep.path}"
|
32
|
+
dep.clean
|
33
|
+
end
|
19
34
|
next if dep.done?
|
20
35
|
if dep.dependencies
|
21
36
|
dep.dependencies.each do |d|
|
22
37
|
inverse_dep[d] ||= []
|
23
|
-
inverse_dep[d] << dep
|
38
|
+
inverse_dep[d] << dep
|
24
39
|
end
|
25
40
|
end
|
26
41
|
input_dependencies.each do |d|
|
27
42
|
inverse_dep[d] ||= []
|
28
|
-
inverse_dep[d] << dep
|
43
|
+
inverse_dep[d] << dep
|
29
44
|
end
|
30
45
|
}
|
31
46
|
inverse_dep.each do |dep,list|
|
@@ -34,7 +49,38 @@ class Step
|
|
34
49
|
end
|
35
50
|
|
36
51
|
def run_dependencies
|
37
|
-
dependencies.each{|dep|
|
52
|
+
dependencies.each{|dep|
|
53
|
+
next if dep.running? || dep.done?
|
54
|
+
compute_options = compute[dep.path] if compute
|
55
|
+
compute_options = [] if compute_options.nil?
|
56
|
+
|
57
|
+
stream = compute_options.include?(:stream)
|
58
|
+
stream = true unless ENV["SCOUT_EXPLICIT_STREAMING"] == 'true'
|
59
|
+
stream = false if compute_options.include?(:produce)
|
60
|
+
|
61
|
+
begin
|
62
|
+
dep.run(stream)
|
63
|
+
rescue ScoutException
|
64
|
+
if compute_options.include?(:canfail)
|
65
|
+
Log.medium "Allow failing of #{dep.path}"
|
66
|
+
else
|
67
|
+
raise $!
|
68
|
+
end
|
69
|
+
end
|
70
|
+
}
|
71
|
+
end
|
72
|
+
|
73
|
+
def abort_dependencies
|
74
|
+
dependencies.each{|dep| dep.abort if dep.running? }
|
38
75
|
end
|
39
76
|
|
77
|
+
def self.wait_for_jobs(jobs)
|
78
|
+
threads = []
|
79
|
+
jobs.each do |job|
|
80
|
+
threads << Thread.new{ job.join }
|
81
|
+
end
|
82
|
+
threads.each do |t|
|
83
|
+
t.join
|
84
|
+
end
|
85
|
+
end
|
40
86
|
end
|
@@ -3,6 +3,7 @@ class Step
|
|
3
3
|
@files_dir ||= begin
|
4
4
|
dir = @path + ".files"
|
5
5
|
@path.annotate(dir) if Path === @path
|
6
|
+
dir.pkgdir = self
|
6
7
|
dir
|
7
8
|
end
|
8
9
|
end
|
@@ -12,4 +13,8 @@ class Step
|
|
12
13
|
Path.setup(dir) unless Path === dir
|
13
14
|
dir[file]
|
14
15
|
end
|
16
|
+
|
17
|
+
def bundle_files
|
18
|
+
[path, info_file, Dir.glob(File.join(files_dir,"**/*"))].flatten.select{|f| Open.exist?(f) }
|
19
|
+
end
|
15
20
|
end
|
@@ -1,6 +1,7 @@
|
|
1
1
|
class Step
|
2
2
|
SERIALIZER = :marshal
|
3
3
|
def info_file
|
4
|
+
return nil if @path.nil?
|
4
5
|
@info_file ||= begin
|
5
6
|
info_file = @path + ".info"
|
6
7
|
@path.annotate info_file if Path === @path
|
@@ -19,6 +20,10 @@ class Step
|
|
19
20
|
@info_load_time = Time.now
|
20
21
|
end
|
21
22
|
|
23
|
+
def clear_info
|
24
|
+
save_info(@info = {})
|
25
|
+
end
|
26
|
+
|
22
27
|
def info
|
23
28
|
outdated = begin
|
24
29
|
@info_load_time && (mtime = Open.mtime(info_file)) && mtime > @info_load_time
|
@@ -36,7 +41,20 @@ class Step
|
|
36
41
|
def merge_info(new_info)
|
37
42
|
info = self.info
|
38
43
|
new_info.each do |key,value|
|
39
|
-
|
44
|
+
if key == :status
|
45
|
+
message = new_info[:messages]
|
46
|
+
if message.nil? && (value == :done || value == :error || value == :aborted)
|
47
|
+
start = info[:start]
|
48
|
+
eend = new_info[:end]
|
49
|
+
if start && eend
|
50
|
+
time = eend - start
|
51
|
+
time_str = Misc.format_seconds_short(time)
|
52
|
+
message = Log.color(:time, time_str)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
report_status value, message
|
56
|
+
end
|
57
|
+
|
40
58
|
if Exception === value
|
41
59
|
begin
|
42
60
|
Marshal.dump(value)
|
@@ -50,6 +68,7 @@ class Step
|
|
50
68
|
value = new
|
51
69
|
end
|
52
70
|
end
|
71
|
+
|
53
72
|
if info.include?(key)
|
54
73
|
case info[key]
|
55
74
|
when Array
|
@@ -72,15 +91,21 @@ class Step
|
|
72
91
|
|
73
92
|
def report_status(status, message = nil)
|
74
93
|
if message.nil?
|
75
|
-
Log.info Log.color(:status, status, true)
|
94
|
+
Log.info [Log.color(:status, status, true), Log.color(:task, task_name, true), Log.color(:path, path)] * " "
|
76
95
|
else
|
77
|
-
Log.info Log.color(:status, status, true)
|
96
|
+
Log.info [Log.color(:status, status, true), Log.color(:task, task_name, true), message, Log.color(:path, path)] * " "
|
78
97
|
end
|
79
98
|
end
|
80
99
|
|
81
|
-
def log(status, message = nil)
|
100
|
+
def log(status, message = nil, &block)
|
101
|
+
if block_given?
|
102
|
+
time = Misc.exec_time &block
|
103
|
+
time_str = Misc.format_seconds_short time
|
104
|
+
message = message.nil? ? Log.color(:time, time_str) : "#{Log.color :time, time_str} - #{ message }"
|
105
|
+
end
|
106
|
+
|
82
107
|
if message
|
83
|
-
merge_info :status => status, :messages =>
|
108
|
+
merge_info :status => status, :messages => message
|
84
109
|
else
|
85
110
|
merge_info :status => status
|
86
111
|
end
|
@@ -91,11 +116,11 @@ class Step
|
|
91
116
|
end
|
92
117
|
|
93
118
|
def error?
|
94
|
-
status == :error
|
119
|
+
status == :error || status == 'error'
|
95
120
|
end
|
96
121
|
|
97
122
|
def aborted?
|
98
|
-
status == :aborted
|
123
|
+
status == :aborted || status == 'aborted'
|
99
124
|
end
|
100
125
|
|
101
126
|
def running?
|
@@ -105,4 +130,12 @@ class Step
|
|
105
130
|
def exception
|
106
131
|
info[:exception]
|
107
132
|
end
|
133
|
+
|
134
|
+
def marshal_dump
|
135
|
+
@path
|
136
|
+
end
|
137
|
+
|
138
|
+
def marshal_load(path)
|
139
|
+
Step.new path
|
140
|
+
end
|
108
141
|
end
|
@@ -5,7 +5,7 @@ class Step
|
|
5
5
|
|
6
6
|
def self.status_color(status)
|
7
7
|
case status.to_sym
|
8
|
-
when :error, :aborted, :
|
8
|
+
when :error, :aborted, :dead, :unsync
|
9
9
|
:red
|
10
10
|
when :streaming, :started
|
11
11
|
:cyan
|
@@ -13,7 +13,7 @@ class Step
|
|
13
13
|
:green
|
14
14
|
when :dependencies, :waiting, :setup
|
15
15
|
:yellow
|
16
|
-
when :notfound, :cleaned
|
16
|
+
when :notfound, :cleaned, :missing
|
17
17
|
:blue
|
18
18
|
else
|
19
19
|
if status.to_s.index ">"
|
@@ -91,6 +91,8 @@ class Step
|
|
91
91
|
info[:task_name] = task
|
92
92
|
path = step.path
|
93
93
|
status = info[:status] || :missing
|
94
|
+
status = status.to_sym if String === status
|
95
|
+
status = :noinfo if status == :missing && Open.exist?(path)
|
94
96
|
status = "remote" if Open.remote?(path) || Open.ssh?(path)
|
95
97
|
name = info[:name] || File.basename(path)
|
96
98
|
status = :unsync if status == :done and not Open.exist?(path)
|
@@ -103,9 +105,9 @@ class Step
|
|
103
105
|
step.dependencies.each do |dep|
|
104
106
|
if dep.input_dependencies.any?
|
105
107
|
dep.input_dependencies.each do |id|
|
106
|
-
input_name, _dep = dep.recursive_inputs.
|
108
|
+
input_name, _dep = dep.recursive_inputs.select{|f,d|
|
107
109
|
d == id || (String === d && d.start_with?(id.files_dir)) || (Array === d && d.include?(id))
|
108
|
-
}.last
|
110
|
+
}.keys.last
|
109
111
|
if input_name
|
110
112
|
input_dependencies[id] ||= []
|
111
113
|
input_dependencies[id] << [dep, input_name]
|
@@ -115,10 +117,10 @@ class Step
|
|
115
117
|
end if step.dependencies
|
116
118
|
|
117
119
|
str = ""
|
118
|
-
str = " " * offset + this_step_msg if ENV["
|
120
|
+
str = " " * offset + this_step_msg if ENV["SCOUT_ORIGINAL_STACK"] == 'true'
|
119
121
|
|
120
122
|
step.dependencies.dup.tap{|l|
|
121
|
-
l.reverse! if ENV["
|
123
|
+
l.reverse! if ENV["SCOUT_ORIGINAL_STACK"] == 'true'
|
122
124
|
}.each do |dep|
|
123
125
|
path = dep.path
|
124
126
|
new = ! seen.include?(path)
|
@@ -141,7 +143,7 @@ class Step
|
|
141
143
|
end
|
142
144
|
end if step.dependencies
|
143
145
|
|
144
|
-
str += (" " * offset) + this_step_msg unless ENV["
|
146
|
+
str += (" " * offset) + this_step_msg unless ENV["SCOUT_ORIGINAL_STACK"] == 'true'
|
145
147
|
|
146
148
|
str
|
147
149
|
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
class Step
|
2
|
+
def abort(exception = nil)
|
3
|
+
if info[:pid] != Process.pid && Misc.alive?(pid)
|
4
|
+
Process.kill pid
|
5
|
+
else
|
6
|
+
while @result && streaming? && stream = self.stream
|
7
|
+
stream.abort(exception)
|
8
|
+
end
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
def recoverable_error?
|
13
|
+
self.error? && ! (ScoutException === self.exception)
|
14
|
+
end
|
15
|
+
|
16
|
+
def updated?
|
17
|
+
return false if self.error? && self.recoverable_error?
|
18
|
+
return true unless ENV["SCOUT_UPDATE"]
|
19
|
+
newer = rec_dependencies.select{|dep| Path.newer?(self.path, dep.path) }
|
20
|
+
newer += input_dependencies.select{|dep| Path.newer?(self.path, dep.path) }
|
21
|
+
|
22
|
+
newer.empty?
|
23
|
+
end
|
24
|
+
|
25
|
+
def clean
|
26
|
+
@take_stream = nil
|
27
|
+
@result = nil
|
28
|
+
@info = nil
|
29
|
+
@info_load_time = nil
|
30
|
+
Open.rm path if Open.exist?(path)
|
31
|
+
Open.rm info_file if Open.exist?(info_file)
|
32
|
+
Open.rm_rf files_dir if Open.exist?(files_dir)
|
33
|
+
end
|
34
|
+
|
35
|
+
|
36
|
+
def recursive_clean
|
37
|
+
dependencies.each do |dep|
|
38
|
+
dep.recursive_clean
|
39
|
+
end
|
40
|
+
clean
|
41
|
+
end
|
42
|
+
|
43
|
+
end
|