neptune 0.2.1 → 0.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README +4 -0
- data/doc/BabelHelper.html +393 -376
- data/doc/BadConfigurationException.html +121 -127
- data/doc/CommonFunctions.html +237 -265
- data/doc/ExodusHelper.html +820 -0
- data/doc/ExodusTaskInfo.html +263 -0
- data/doc/FileNotFoundException.html +121 -127
- data/doc/NeptuneHelper.html +527 -592
- data/doc/NeptuneManagerClient.html +696 -0
- data/doc/NeptuneManagerException.html +139 -0
- data/doc/Object.html +334 -236
- data/doc/TaskInfo.html +428 -0
- data/doc/created.rid +8 -5
- data/doc/images/add.png +0 -0
- data/doc/images/delete.png +0 -0
- data/doc/images/tag_blue.png +0 -0
- data/doc/images/transparent.png +0 -0
- data/doc/index.html +74 -142
- data/doc/js/darkfish.js +99 -62
- data/doc/js/jquery.js +15 -29
- data/doc/js/navigation.js +142 -0
- data/doc/js/search.js +94 -0
- data/doc/js/search_index.js +1 -0
- data/doc/js/searcher.js +228 -0
- data/doc/table_of_contents.html +226 -0
- data/lib/babel.rb +116 -50
- data/lib/custom_exceptions.rb +2 -2
- data/lib/exodus.rb +311 -0
- data/lib/exodus_task_info.rb +36 -0
- data/lib/neptune.rb +52 -18
- data/lib/{app_controller_client.rb → neptune_manager_client.rb} +54 -38
- data/lib/task_info.rb +155 -0
- data/test/{unit/test_babel.rb → test_babel.rb} +161 -26
- data/test/{unit/test_common_functions.rb → test_common_functions.rb} +1 -1
- data/test/test_exodus.rb +687 -0
- data/test/{unit/test_neptune.rb → test_neptune.rb} +28 -17
- data/test/{unit/test_app_controller_client.rb → test_neptune_manager_client.rb} +15 -16
- data/test/test_task_info.rb +32 -0
- data/test/{unit/ts_all.rb → ts_all.rb} +3 -1
- metadata +30 -34
- data/doc/AppControllerClient.html +0 -702
- data/doc/AppControllerException.html +0 -145
- data/doc/bin/neptune.html +0 -56
- data/doc/js/quicksearch.js +0 -114
- data/doc/js/thickbox-compressed.js +0 -10
- data/doc/lib/app_controller_client_rb.html +0 -60
- data/doc/lib/babel_rb.html +0 -68
- data/doc/lib/common_functions_rb.html +0 -70
- data/doc/lib/custom_exceptions_rb.html +0 -54
- data/doc/lib/neptune_rb.html +0 -60
- data/test/integration/tc_c.rb +0 -57
- data/test/integration/tc_dfsp.rb +0 -37
- data/test/integration/tc_dwssa.rb +0 -38
- data/test/integration/tc_erlang.rb +0 -183
- data/test/integration/tc_mapreduce.rb +0 -282
- data/test/integration/tc_mpi.rb +0 -160
- data/test/integration/tc_storage.rb +0 -209
- data/test/integration/tc_upc.rb +0 -75
- data/test/integration/tc_x10.rb +0 -94
- data/test/integration/test_helper.rb +0 -135
- data/test/integration/ts_neptune.rb +0 -40
data/lib/custom_exceptions.rb
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
# Programmer: Chris Bunch
|
2
2
|
|
3
|
-
# A special class of exceptions that are thrown whenever the
|
3
|
+
# A special class of exceptions that are thrown whenever the NeptuneManager
|
4
4
|
# experiences an unexpected result.
|
5
|
-
class
|
5
|
+
class NeptuneManagerException < Exception
|
6
6
|
end
|
7
7
|
|
8
8
|
|
data/lib/exodus.rb
ADDED
@@ -0,0 +1,311 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
# Programmer: Chris Bunch
|
3
|
+
|
4
|
+
|
5
|
+
require 'babel'
|
6
|
+
require 'custom_exceptions'
|
7
|
+
require 'exodus_task_info'
|
8
|
+
|
9
|
+
|
10
|
+
# Exodus provides further improvements to Babel. Instead of making users tell
|
11
|
+
# us what compute, storage, and queue services they want to use (required for
|
12
|
+
# babel calls), Exodus will automatically handle this for us. Callers need
|
13
|
+
# to specify what clouds their job can run over, and Exodus will automatically
|
14
|
+
# select the best cloud for their job and run it there.
|
15
|
+
def exodus(jobs)
|
16
|
+
if jobs.class == Hash
|
17
|
+
job_given_as_hash = true
|
18
|
+
jobs = [jobs]
|
19
|
+
elsif jobs.class == Array
|
20
|
+
job_given_as_hash = false
|
21
|
+
ExodusHelper.ensure_all_jobs_are_hashes(jobs)
|
22
|
+
else
|
23
|
+
raise BadConfigurationException.new("jobs was a #{jobs.class}, which " +
|
24
|
+
"is not an acceptable class type")
|
25
|
+
end
|
26
|
+
|
27
|
+
tasks = []
|
28
|
+
|
29
|
+
jobs.each { |job|
|
30
|
+
ExodusHelper.ensure_all_params_are_present(job)
|
31
|
+
profiling_info = ExodusHelper.get_profiling_info(job)
|
32
|
+
clouds_to_run_task_on = ExodusHelper.get_clouds_to_run_task_on(job,
|
33
|
+
profiling_info)
|
34
|
+
babel_tasks_to_run = ExodusHelper.generate_babel_tasks(job,
|
35
|
+
clouds_to_run_task_on)
|
36
|
+
dispatched_tasks = ExodusHelper.run_job(babel_tasks_to_run)
|
37
|
+
tasks << ExodusTaskInfo.new(dispatched_tasks)
|
38
|
+
}
|
39
|
+
|
40
|
+
if job_given_as_hash
|
41
|
+
return tasks[0]
|
42
|
+
else
|
43
|
+
return tasks
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
|
48
|
+
# This module provides convenience functions for exodus(), to avoid cluttering
|
49
|
+
# up Object or Kernel's namespace.
|
50
|
+
module ExodusHelper
|
51
|
+
|
52
|
+
|
53
|
+
# A list of clouds that users can run tasks on via Exodus.
|
54
|
+
SUPPORTED_CLOUDS = [:AmazonEC2, :Eucalyptus, :GoogleAppEngine,
|
55
|
+
:MicrosoftAzure]
|
56
|
+
|
57
|
+
|
58
|
+
CLOUD_CREDENTIALS = {
|
59
|
+
:AmazonEC2 => [:EC2_ACCESS_KEY, :EC2_SECRET_KEY, :EC2_URL, :S3_URL,
|
60
|
+
:S3_bucket_name],
|
61
|
+
:Eucalyptus => [:EUCA_ACCESS_KEY, :EUCA_SECRET_KEY, :EUCA_URL,
|
62
|
+
:WALRUS_URL, :Walrus_bucket_name],
|
63
|
+
:GoogleAppEngine => [:appid, :appcfg_cookies, :function,
|
64
|
+
:GStorage_bucket_name],
|
65
|
+
:MicrosoftAzure => [:WAZ_Account_Name, :WAZ_Access_Key,
|
66
|
+
:WAZ_Container_Name]
|
67
|
+
}
|
68
|
+
|
69
|
+
|
70
|
+
CLOUD_BABEL_PARAMS = {
|
71
|
+
:AmazonEC2 => {
|
72
|
+
:storage => "s3",
|
73
|
+
:engine => "executor-sqs"
|
74
|
+
},
|
75
|
+
:Eucalyptus => {
|
76
|
+
:storage => "walrus",
|
77
|
+
:engine => "executor-rabbitmq"
|
78
|
+
},
|
79
|
+
:GoogleAppEngine => {
|
80
|
+
:storage => "gstorage",
|
81
|
+
:engine => "appengine-push-q"
|
82
|
+
},
|
83
|
+
:MicrosoftAzure => {
|
84
|
+
:storage => "waz-storage",
|
85
|
+
:engine => "waz-push-q"
|
86
|
+
}
|
87
|
+
}
|
88
|
+
|
89
|
+
|
90
|
+
OPTIMIZE_FOR_CHOICES = [:performance, :cost, :auto]
|
91
|
+
|
92
|
+
|
93
|
+
# Given an Array of jobs to run, ensures that they are all Hashes, the
|
94
|
+
# standard format for Neptune jobs.
|
95
|
+
def self.ensure_all_jobs_are_hashes(jobs)
|
96
|
+
jobs.each { |job|
|
97
|
+
if job.class != Hash
|
98
|
+
raise BadConfigurationException.new("A job passed to exodus() was " +
|
99
|
+
"not a Hash, but was a #{job.class}")
|
100
|
+
end
|
101
|
+
}
|
102
|
+
end
|
103
|
+
|
104
|
+
|
105
|
+
# Given an Exodus job, validates its parameters, raising a
|
106
|
+
# BadConfigurationException for any missing params.
|
107
|
+
def self.ensure_all_params_are_present(job)
|
108
|
+
if job[:clouds_to_use].nil?
|
109
|
+
raise BadConfigurationException.new(":clouds_to_use was not specified")
|
110
|
+
else
|
111
|
+
self.convert_clouds_to_use_to_array(job)
|
112
|
+
self.validate_clouds_to_use(job)
|
113
|
+
self.validate_optimize_for_param(job)
|
114
|
+
self.validate_files_argv_executable(job)
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
|
119
|
+
# Given a single Exodus job, checks to make sure it has either an Array
|
120
|
+
# of Strings or a single String listing the clouds that a given task can
|
121
|
+
# run on. Raises a BadConfigurationException if :clouds_to_use is not in
|
122
|
+
# the right format.
|
123
|
+
def self.convert_clouds_to_use_to_array(job)
|
124
|
+
clouds_class = job[:clouds_to_use].class
|
125
|
+
if clouds_class == Symbol
|
126
|
+
job[:clouds_to_use] = [job[:clouds_to_use]]
|
127
|
+
elsif clouds_class == Array
|
128
|
+
job[:clouds_to_use].each { |cloud|
|
129
|
+
if cloud.class != Symbol
|
130
|
+
raise BadConfigurationException.new("#{cloud} was not a String, " +
|
131
|
+
"but was a #{cloud.class}")
|
132
|
+
end
|
133
|
+
}
|
134
|
+
else
|
135
|
+
raise BadConfigurationException.new("#{job[:clouds_to_use]} was not " +
|
136
|
+
"a String or Array, but was a #{clouds_class}")
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
|
141
|
+
# Given a single Exodus job, checks to make sure that we can actually run
|
142
|
+
# it in this version of Neptune, and that the user has given us all the
|
143
|
+
# credentials needed to use that cloud.
|
144
|
+
def self.validate_clouds_to_use(job)
|
145
|
+
self.ensure_credentials_are_in_correct_format(job)
|
146
|
+
self.propogate_credentials_from_environment(job)
|
147
|
+
|
148
|
+
job[:clouds_to_use].each { |cloud|
|
149
|
+
if SUPPORTED_CLOUDS.include?(cloud)
|
150
|
+
CLOUD_CREDENTIALS[cloud].each { |required_credential|
|
151
|
+
val_for_credential = job[:credentials][required_credential]
|
152
|
+
if val_for_credential.nil? or val_for_credential.empty?
|
153
|
+
raise BadConfigurationException.new("To use #{cloud}, " +
|
154
|
+
"#{required_credential} must be specified.")
|
155
|
+
end
|
156
|
+
}
|
157
|
+
else
|
158
|
+
raise BadConfigurationException.new("#{cloud} was specified as in " +
|
159
|
+
":clouds_to_use, which is not a supported cloud.")
|
160
|
+
end
|
161
|
+
}
|
162
|
+
end
|
163
|
+
|
164
|
+
|
165
|
+
def self.ensure_credentials_are_in_correct_format(job)
|
166
|
+
if job[:credentials].nil?
|
167
|
+
raise BadConfigurationException.new("No credentials were specified.")
|
168
|
+
end
|
169
|
+
|
170
|
+
if job[:credentials].class != Hash
|
171
|
+
raise BadConfigurationException.new("Credentials given were not a " +
|
172
|
+
"Hash, but were a #{job[:credentials].class}")
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
|
177
|
+
# Searches the caller's environment variables, and adds any that could
|
178
|
+
# be used in this Exodus job. Only takes in credentials from the
|
179
|
+
# environment if the job does not specify it.
|
180
|
+
def self.propogate_credentials_from_environment(job)
|
181
|
+
CLOUD_CREDENTIALS.each { |cloud_name, credential_list|
|
182
|
+
credential_list.each { |cred|
|
183
|
+
if job[:credentials][cred].nil? and !ENV[cred.to_s].nil?
|
184
|
+
job[:credentials][cred] = ENV[cred.to_s]
|
185
|
+
end
|
186
|
+
}
|
187
|
+
}
|
188
|
+
end
|
189
|
+
|
190
|
+
|
191
|
+
def self.validate_optimize_for_param(job)
|
192
|
+
if job[:optimize_for].nil?
|
193
|
+
raise BadConfigurationException.new(":optimize_for needs to be " +
|
194
|
+
"specified when running Exodus jobs")
|
195
|
+
end
|
196
|
+
|
197
|
+
if !OPTIMIZE_FOR_CHOICES.include?(job[:optimize_for])
|
198
|
+
raise BadConfigurationException.new("The value given for " +
|
199
|
+
":optimize_for was not an acceptable value. Acceptable values are: " +
|
200
|
+
"#{OPTIMIZE_FOR_CHOICES.join(', ')}")
|
201
|
+
end
|
202
|
+
end
|
203
|
+
|
204
|
+
|
205
|
+
def self.validate_files_argv_executable(job)
|
206
|
+
[:code, :argv, :executable].each { |param|
|
207
|
+
if job[param].nil?
|
208
|
+
raise BadConfigurationException.new("#{param} was not specified")
|
209
|
+
end
|
210
|
+
}
|
211
|
+
end
|
212
|
+
|
213
|
+
|
214
|
+
def self.get_profiling_info(job)
|
215
|
+
key = self.get_key_from_job_data(job)
|
216
|
+
neptune_manager = BabelHelper.get_neptune_manager_client(job)
|
217
|
+
return neptune_manager.get_profiling_info(key)
|
218
|
+
end
|
219
|
+
|
220
|
+
|
221
|
+
# TODO(cgb): what is a job's key?
|
222
|
+
def self.get_key_from_job_data(job)
|
223
|
+
return job[:code]
|
224
|
+
end
|
225
|
+
|
226
|
+
|
227
|
+
def self.get_clouds_to_run_task_on(job, profiling_info)
|
228
|
+
optimize_for = job[:optimize_for]
|
229
|
+
if optimize_for == :performance or optimize_for == :cost
|
230
|
+
return self.get_minimum_val_in_data(job, profiling_info)
|
231
|
+
else
|
232
|
+
return self.find_optimal_cloud_for_task(job, profiling_info)
|
233
|
+
end
|
234
|
+
end
|
235
|
+
|
236
|
+
|
237
|
+
def self.get_minimum_val_in_data(job, profiling_info)
|
238
|
+
min_cloud = nil
|
239
|
+
min_val = 1_000_000 # infinity
|
240
|
+
optimize_for = job[:optimize_for].to_s
|
241
|
+
|
242
|
+
clouds_to_run_on = []
|
243
|
+
job[:clouds_to_use].each { |cloud|
|
244
|
+
# If we have no information on this cloud, then add it to the list
|
245
|
+
# of clouds we should run the task on, since it could potentially be
|
246
|
+
# lower than the minimum in the data we've seen so far.
|
247
|
+
if profiling_info[cloud.to_s].nil?
|
248
|
+
clouds_to_run_on << cloud
|
249
|
+
next
|
250
|
+
end
|
251
|
+
|
252
|
+
val = self.average(profiling_info[cloud.to_s][optimize_for])
|
253
|
+
if val < min_val
|
254
|
+
min_cloud = cloud
|
255
|
+
min_val = val
|
256
|
+
end
|
257
|
+
}
|
258
|
+
|
259
|
+
if !min_cloud.nil?
|
260
|
+
clouds_to_run_on << min_cloud
|
261
|
+
end
|
262
|
+
|
263
|
+
return clouds_to_run_on
|
264
|
+
end
|
265
|
+
|
266
|
+
|
267
|
+
# Given an Array of values, calculates and returns their average.
|
268
|
+
def self.average(vals)
|
269
|
+
sum = vals.reduce(0.0) { |running_total, val|
|
270
|
+
running_total + val
|
271
|
+
}
|
272
|
+
|
273
|
+
return sum / vals.length
|
274
|
+
end
|
275
|
+
|
276
|
+
|
277
|
+
def self.find_optimal_cloud_for_task(job, profiling_info)
|
278
|
+
raise NotImplementedError
|
279
|
+
end
|
280
|
+
|
281
|
+
|
282
|
+
def self.generate_babel_tasks(job, clouds_to_run_task_on)
|
283
|
+
tasks = []
|
284
|
+
|
285
|
+
clouds_to_run_task_on.each { |cloud|
|
286
|
+
task = { :type => "babel",
|
287
|
+
:code => job[:code],
|
288
|
+
:argv => job[:argv],
|
289
|
+
:executable => job[:executable],
|
290
|
+
:is_remote => false,
|
291
|
+
:run_local => false
|
292
|
+
}
|
293
|
+
|
294
|
+
CLOUD_CREDENTIALS[cloud].each { |credential|
|
295
|
+
task[credential] = job[:credentials][credential]
|
296
|
+
}
|
297
|
+
|
298
|
+
task.merge!(CLOUD_BABEL_PARAMS[cloud])
|
299
|
+
tasks << task
|
300
|
+
}
|
301
|
+
|
302
|
+
return tasks
|
303
|
+
end
|
304
|
+
|
305
|
+
|
306
|
+
def self.run_job(tasks_to_run)
|
307
|
+
return babel(tasks_to_run)
|
308
|
+
end
|
309
|
+
|
310
|
+
|
311
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
# Programmer: Chris Bunch
|
2
|
+
|
3
|
+
|
4
|
+
require 'task_info'
|
5
|
+
|
6
|
+
|
7
|
+
class ExodusTaskInfo
|
8
|
+
|
9
|
+
|
10
|
+
def initialize(dispatched_babel_tasks)
|
11
|
+
@babel_tasks = dispatched_babel_tasks
|
12
|
+
end
|
13
|
+
|
14
|
+
|
15
|
+
def to_s
|
16
|
+
method_missing(:to_s)
|
17
|
+
end
|
18
|
+
|
19
|
+
|
20
|
+
def method_missing(id, *args, &block)
|
21
|
+
loop {
|
22
|
+
@babel_tasks.each_with_index { |task, i|
|
23
|
+
begin
|
24
|
+
Timeout::timeout(2) {
|
25
|
+
result = task.send(id, *args, &block)
|
26
|
+
return result
|
27
|
+
}
|
28
|
+
rescue Timeout::Error
|
29
|
+
next
|
30
|
+
end
|
31
|
+
}
|
32
|
+
}
|
33
|
+
end
|
34
|
+
|
35
|
+
|
36
|
+
end
|
data/lib/neptune.rb
CHANGED
@@ -1,9 +1,9 @@
|
|
1
1
|
#!/usr/bin/ruby
|
2
2
|
# Programmer: Chris Bunch (cgb@cs.ucsb.edu)
|
3
3
|
|
4
|
-
require 'app_controller_client'
|
5
4
|
require 'common_functions'
|
6
5
|
require 'custom_exceptions'
|
6
|
+
require 'neptune_manager_client'
|
7
7
|
|
8
8
|
|
9
9
|
# Setting verbose to nil here suppresses the otherwise
|
@@ -13,7 +13,7 @@ $VERBOSE = nil
|
|
13
13
|
|
14
14
|
|
15
15
|
# A list of all the Neptune job types that we support
|
16
|
-
ALLOWED_JOB_TYPES = %w{acl cicero compile erlang mpi input output ssa babel upc x10}
|
16
|
+
ALLOWED_JOB_TYPES = %w{acl cicero compile erlang mpi input output ssa babel upc x10 mapreduce}
|
17
17
|
|
18
18
|
|
19
19
|
# The string to display for disallowed job types.
|
@@ -32,7 +32,7 @@ NO_OUTPUT_NEEDED = ["input"]
|
|
32
32
|
|
33
33
|
# A list of storage mechanisms that we can use to store and retrieve
|
34
34
|
# data to for Neptune jobs.
|
35
|
-
ALLOWED_STORAGE_TYPES =
|
35
|
+
ALLOWED_STORAGE_TYPES = %w{appdb gstorage s3 walrus waz-storage}
|
36
36
|
|
37
37
|
|
38
38
|
# A list of jobs that require some kind of work to be done before
|
@@ -55,23 +55,50 @@ public
|
|
55
55
|
# the user can request to run a job, retrieve a job's output, or modify the
|
56
56
|
# access policy (ACL) for the output of a job. By default, job data is private,
|
57
57
|
# but a Neptune job can be used to set it to public later (and vice-versa).
|
58
|
-
def neptune(
|
58
|
+
def neptune(jobs)
|
59
59
|
# Kernel.puts "Received a request to run a job."
|
60
60
|
# Kernel.puts params[:type]
|
61
|
+
if jobs.class == Hash
|
62
|
+
jobs = [jobs]
|
63
|
+
end
|
61
64
|
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
65
|
+
job_data_list = []
|
66
|
+
shadow_ip = nil
|
67
|
+
ssh_args = ""
|
68
|
+
secret = ""
|
69
|
+
controller = nil
|
66
70
|
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
+
jobs.each { |params|
|
72
|
+
job_data = NeptuneHelper.get_job_data(params)
|
73
|
+
NeptuneHelper.validate_storage_params(job_data)
|
74
|
+
# Kernel.puts "job data = #{job_data.inspect}"
|
75
|
+
keyname = job_data["@keyname"]
|
71
76
|
|
72
|
-
|
73
|
-
|
74
|
-
|
77
|
+
shadow_ip = CommonFunctions.get_from_yaml(keyname, :shadow)
|
78
|
+
secret = CommonFunctions.get_secret_key(keyname)
|
79
|
+
ssh_key = File.expand_path("~/.appscale/#{keyname}.key")
|
80
|
+
ssh_args = "-i ~/.appscale/#{keyname}.key -o StrictHostkeyChecking=no "
|
81
|
+
|
82
|
+
controller = NeptuneManagerClient.new(shadow_ip, secret)
|
83
|
+
NeptuneHelper.do_preprocessing(job_data, controller)
|
84
|
+
job_data_list << job_data
|
85
|
+
}
|
86
|
+
|
87
|
+
if job_data_list.length == 1
|
88
|
+
return NeptuneHelper.run_job(job_data_list[0], ssh_args, shadow_ip,
|
89
|
+
secret)
|
90
|
+
else # right now we only support batch run_job operations
|
91
|
+
msg = controller.start_neptune_job(job_data_list)
|
92
|
+
result = {}
|
93
|
+
result[:msg] = msg
|
94
|
+
if result[:msg] =~ /job is now running\Z/
|
95
|
+
result[:result] = :success
|
96
|
+
else
|
97
|
+
result[:result] = :failure
|
98
|
+
end
|
99
|
+
|
100
|
+
return result
|
101
|
+
end
|
75
102
|
end
|
76
103
|
|
77
104
|
|
@@ -145,6 +172,9 @@ module NeptuneHelper
|
|
145
172
|
def self.preprocess_mpi(job_data, controller)
|
146
173
|
self.require_param("@nodes_to_use", job_data)
|
147
174
|
self.require_param("@procs_to_use", job_data)
|
175
|
+
self.require_param("@output", job_data)
|
176
|
+
self.require_param("@error", job_data)
|
177
|
+
self.require_param("@metadata", job_data)
|
148
178
|
|
149
179
|
if job_data["@procs_to_use"]
|
150
180
|
p = job_data["@procs_to_use"]
|
@@ -200,7 +230,7 @@ module NeptuneHelper
|
|
200
230
|
end
|
201
231
|
|
202
232
|
|
203
|
-
# This helper method asks the
|
233
|
+
# This helper method asks the NeptuneManager if the named file exists,
|
204
234
|
# and if it does not, throws an exception.
|
205
235
|
def self.require_file_to_exist(file, job_data, controller)
|
206
236
|
if controller.does_file_exist?(file, job_data)
|
@@ -232,6 +262,8 @@ module NeptuneHelper
|
|
232
262
|
self.require_param("@code", job_data)
|
233
263
|
self.require_param("@engine", job_data)
|
234
264
|
self.require_param("@output", job_data)
|
265
|
+
self.require_param("@error", job_data)
|
266
|
+
self.require_param("@metadata", job_data)
|
235
267
|
|
236
268
|
# For most code types, the file's name given is the thing to exec.
|
237
269
|
# For Java, the actual file to search for is whatever the user gives
|
@@ -243,6 +275,8 @@ module NeptuneHelper
|
|
243
275
|
|
244
276
|
self.require_file_to_exist(code_file_name, job_data, controller)
|
245
277
|
self.require_file_to_not_exist(job_data["@output"], job_data, controller)
|
278
|
+
self.require_file_to_not_exist(job_data["@error"], job_data, controller)
|
279
|
+
self.require_file_to_not_exist(job_data["@metadata"], job_data, controller)
|
246
280
|
|
247
281
|
if job_data["@argv"]
|
248
282
|
argv = job_data["@argv"]
|
@@ -275,7 +309,7 @@ module NeptuneHelper
|
|
275
309
|
|
276
310
|
# This method takes in a hash in the format that users write neptune/babel
|
277
311
|
# jobs in {:a => "b"} and converts it to the legacy format that Neptune
|
278
|
-
# used to use {"@a" => "b"}, and is understood by the
|
312
|
+
# used to use {"@a" => "b"}, and is understood by the NeptuneManager.
|
279
313
|
def self.get_job_data(params)
|
280
314
|
job_data = {}
|
281
315
|
params.each { |k, v|
|
@@ -500,7 +534,7 @@ module NeptuneHelper
|
|
500
534
|
# This method actually runs the Neptune job, given information about the job
|
501
535
|
# as well as information about the node to send the request to.
|
502
536
|
def self.run_job(job_data, ssh_args, shadow_ip, secret)
|
503
|
-
controller =
|
537
|
+
controller = NeptuneManagerClient.new(shadow_ip, secret)
|
504
538
|
|
505
539
|
# TODO - right now the job is assumed to succeed in many cases
|
506
540
|
# need to investigate the various failure scenarios
|