neptune 0.0.7 → 0.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/README CHANGED
@@ -38,7 +38,10 @@ be found in the test folder, with the standard naming convention
38
38
  cases for each type of job that Neptune offers. Before running
39
39
  ts_neptune, you should export the environment variable APPSCALE_HEAD_NODE,
40
40
  which should be set to the IP address of the AppScale machine that runs
41
- the Shadow daemon (a.k.a. the Master AppController).
41
+ the Shadow daemon (a.k.a. the Master AppController). Running
42
+ generate_coverage.sh in the top-level directory will run rcov
43
+ and generate the coverage reports automatically - currently this process
44
+ takes about an hour.
42
45
 
43
46
  Developed by Chris Bunch as part of the AppScale project.
44
47
  See LICENSE for the specifics of the New BSD License
@@ -63,10 +66,14 @@ in for a link to that as it becomes available.
63
66
 
64
67
  Version History:
65
68
 
66
- April 2, 2001 - 0.0.7 released, adding automatic test suite
69
+ April 8, 2011 - 0.0.8 released, fixing MapReduce support
70
+ for both regular Hadoop and Hadoop Streaming. Also increased
71
+ code coverage to cover a number of failure scenarios.
72
+
73
+ April 2, 2011 - 0.0.7 released, adding automatic test suite
67
74
  and many bug fixes for all scenarios. rcov can also be used
68
75
  to generate test coverage information: current coverage stats
69
- can be found in coverage directory. mapreduce broken at the
76
+ can be found in coverage directory. MapReduce broken at the
70
77
  moment - will fix in next release
71
78
 
72
79
  March 28, 2011 - 0.0.6 released, adding support for input jobs,
@@ -5,10 +5,19 @@ $VERBOSE = nil # to surpress excessive SSL cert warnings
5
5
  $:.unshift File.join(File.dirname(__FILE__), "..", "lib")
6
6
  require 'neptune'
7
7
 
8
- file = ARGV[0]
9
- unless File.exists?(file)
10
- abort("File #{file} does not exist.")
11
- end
8
+ if ARGV.length == 1
9
+ file = ARGV[0]
10
+ unless File.exists?(file)
11
+ abort("File #{file} does not exist.")
12
+ end
13
+
14
+ load file
15
+ else
16
+ unless ARGV.length % 2 == 0
17
+ abort("When creating a Neptune job on the fly, an even number of parameters must be specified.")
18
+ end
12
19
 
13
- load file
20
+ params = Hash[*ARGV]
21
+ puts neptune(params)
22
+ end
14
23
 
@@ -95,7 +95,7 @@ class AppControllerClient
95
95
  # storage parameter.
96
96
  def put_input(job_data)
97
97
  result = ""
98
- make_call(NO_TIMEOUT, false) {
98
+ make_call(NO_TIMEOUT, false) {
99
99
  result = conn.neptune_put_input(job_data, @secret)
100
100
  }
101
101
  abort(result) if result =~ /Error:/
@@ -41,17 +41,8 @@ module CommonFunctions
41
41
  ssh_args = "-o StrictHostkeyChecking=no 2>&1"
42
42
  ssh_args << " -r " if is_dir
43
43
 
44
- if public_key_loc.class == Array
45
- public_key_loc.each { |key|
46
- key = File.expand_path(key)
47
- }
48
-
49
- cmd = "scp -i #{public_key_loc.join(' -i ')} #{ssh_args} #{local_file_loc} root@#{target_ip}:#{remote_file_loc}"
50
- else
51
- public_key_loc = File.expand_path(public_key_loc)
52
- cmd = "scp -i #{public_key_loc} #{ssh_args} #{local_file_loc} root@#{target_ip}:#{remote_file_loc}"
53
- end
54
-
44
+ public_key_loc = File.expand_path(public_key_loc)
45
+ cmd = "scp -i #{public_key_loc} #{ssh_args} #{local_file_loc} root@#{target_ip}:#{remote_file_loc}"
55
46
  cmd << "; echo $? >> ~/.appscale/retval"
56
47
 
57
48
  retval_loc = File.expand_path("~/.appscale/retval")
@@ -29,11 +29,11 @@ NO_OUTPUT_NEEDED = ["input"]
29
29
 
30
30
  # A list of storage mechanisms that we can use to store and retrieve
31
31
  # data to for Neptune jobs.
32
- ALLOWED_STORAGE_TYPES = ["appdb", "gstorage", "s3"]
32
+ ALLOWED_STORAGE_TYPES = ["appdb", "gstorage", "s3", "walrus"]
33
33
 
34
34
  # A list of jobs that require some kind of work to be done before
35
35
  # the actual computation can be performed.
36
- NEED_PREPROCESSING = ["compile", "erlang", "mapreduce", "mpi"]
36
+ NEED_PREPROCESSING = ["compile", "erlang", "mpi"]
37
37
 
38
38
  # A set of methods and constants that we've monkey-patched to enable Neptune
39
39
  # support. In the future, it is likely that the only exposed / monkey-patched
@@ -91,34 +91,6 @@ def preprocess_erlang(job_data)
91
91
  CommonFunctions.scp_to_shadow(source_code, dest_code, keyname)
92
92
  end
93
93
 
94
- # This preprocessing method handles copying data for regular
95
- # Hadoop MapReduce and Hadoop MapReduce Streaming. For the former
96
- # case, we copy over just the JAR the user has given us, and
97
- # in the latter case, we copy over the Map and Reduce files
98
- # that have been specified. In either case, if the user has
99
- # specified to us to copy over an input file, we do that as well:
100
- # AppScale will copy it into HDFS for us.
101
- def preprocess_mapreduce(job_data)
102
- return
103
- #items_to_copy = ["@map", "@reduce"] if job_data["@map"] and job_data["@reduce"]
104
- items_to_copy = ["@mapreducejar"] if job_data["@mapreducejar"]
105
- #items_to_copy << "@input" if job_data["@copy_input"]
106
- items_to_copy.each { |item|
107
- source = File.expand_path(job_data[item])
108
- unless File.exists?(source)
109
- abort("The #{item} file #{source} does not exist.")
110
- end
111
-
112
- suffix = source.split('/')[-1]
113
- dest = "/tmp/#{suffix}"
114
-
115
- keyname = job_data["@keyname"]
116
- CommonFunctions.scp_to_shadow(source, dest, keyname)
117
-
118
- job_data[item] = dest
119
- }
120
- end
121
-
122
94
  # This preprocessing method copies over the user's MPI
123
95
  # code to the master node in AppScale - this node will
124
96
  # then copy it to whoever will run the MPI job.
@@ -220,9 +192,10 @@ def neptune(params)
220
192
  abort(msg)
221
193
  end
222
194
 
223
- # Our implementation for storing / retrieving via Google Storage uses
195
+ # Our implementation for storing / retrieving via Google Storage
196
+ # and Walrus uses
224
197
  # the same library as we do for S3 - so just tell it that it's S3
225
- if storage == "gstorage"
198
+ if storage == "gstorage" or storage == "walrus"
226
199
  storage = "s3"
227
200
  job_data["@storage"] = "s3"
228
201
  end
@@ -6,6 +6,32 @@ class TestErlang < Test::Unit::TestCase
6
6
  }
7
7
  end
8
8
 
9
+ def test_nonexistent_source_code
10
+ location = "baz" + TestHelper.get_random_alphanumeric
11
+ main_file = "boo"
12
+ compiled = "bazagain"
13
+
14
+ msg = "Running a compile job with a non-existent source code location" +
15
+ " should have thrown an exception, when in fact it did not."
16
+
17
+ assert_raise(SystemExit, msg) {
18
+ TestHelper.compile_code(location, main_file, compiled)
19
+ }
20
+ end
21
+
22
+ def test_nonexistent_compiled_code
23
+ location = "baz" + TestHelper.get_random_alphanumeric
24
+ output = "/bazboo2"
25
+ storage = "appdb"
26
+
27
+ msg = "Running an Erlang compute job with a non-existent code location" +
28
+ " should have thrown an exception, when in fact it did not."
29
+
30
+ assert_raise(SystemExit, msg) {
31
+ TestHelper.start_job("erlang", location, output, storage)
32
+ }
33
+ end
34
+
9
35
  def run_ring_code(storage)
10
36
  expected_output = "total time for"
11
37
  ring_code = <<BAZ
@@ -1,13 +1,87 @@
1
1
 
2
2
  class TestMapReduce < Test::Unit::TestCase
3
+ def test_java_mr_code
4
+ STORAGE_TYPES.each { |storage|
5
+ # TODO: once files api is good in appscale, test this use case
6
+ next if storage == "appdb"
7
+ run_java_mr_code(storage)
8
+ }
9
+ end
10
+
3
11
  def test_mr_streaming_code
4
12
  STORAGE_TYPES.each { |storage|
5
13
  run_streaming_code(storage)
6
14
  }
7
15
  end
8
16
 
17
+ def run_java_mr_code(storage)
18
+ local_input = File.expand_path("~/neptune/samples/mapreduce/the-end-of-time.txt")
19
+ unless File.exists?(local_input)
20
+ abort("missing input corpus - please download it and try again.")
21
+ end
22
+ input = TestHelper.read_file(local_input)
23
+
24
+ local_code = File.expand_path("~/neptune/samples/mapreduce/hadoop-0.20.0-examples.jar")
25
+ unless File.exists?(local_code)
26
+ abort("missing hadoop examples jar - please download it and try again.")
27
+ end
28
+ main = "wordcount"
29
+
30
+ local_output = File.expand_path("~/neptune/samples/mapreduce/expected-output.txt")
31
+ unless File.exists?(local_output)
32
+ abort("missing expected output - please download it and try again.")
33
+ end
34
+ expected_output = TestHelper.read_file(local_output)
35
+
36
+ folder = "wordcount-#{TestHelper.get_random_alphanumeric}"
37
+ tmp_folder = "/tmp/#{folder}"
38
+ FileUtils.mkdir_p(tmp_folder)
39
+
40
+ input_name = "input"
41
+ local_input = "#{tmp_folder}/#{input_name}"
42
+ TestHelper.write_file(local_input, input)
43
+
44
+ remote_input = TestHelper.get_output_location("#{folder}-input", storage)
45
+ remote_code = TestHelper.get_output_location("#{folder}-code.jar", storage, notxt=true)
46
+ remote_output = TestHelper.get_output_location("#{folder}-output", storage)
47
+
48
+ put_file_in_storage(local_input, remote_input, storage)
49
+ put_file_in_storage(local_code, remote_code, storage)
50
+
51
+ start_mr_code(remote_input, remote_output, remote_code, main, storage)
52
+ get_mr_output(remote_output, expected_output, storage)
53
+ end
54
+
55
+
56
+ def start_mr_code(input, output, code, main, storage)
57
+ params = {
58
+ :type => "mapreduce",
59
+ :input => input,
60
+ :output => output,
61
+ :mapreducejar => code,
62
+ :main => main,
63
+ :nodes_to_use => 1
64
+ }.merge(TestHelper.get_storage_params(storage))
65
+
66
+ status = nil
67
+
68
+ loop {
69
+ status = neptune(params)
70
+ if status[:msg] =~ /not enough free nodes/
71
+ puts status[:msg]
72
+ else
73
+ break
74
+ end
75
+ sleep(5)
76
+ }
77
+
78
+ msg = "Your job was not started successfully. The failure message " +
79
+ "reported was #{status[:msg]}"
80
+ assert_equal(status[:result], :success, msg)
81
+ end
82
+
9
83
  def run_streaming_code(storage)
10
- expected_output = "0: We have 1 processors"
84
+ expected_output = "sum x ="
11
85
  input = <<BAZ
12
86
  1 32
13
87
  33 64
@@ -16,7 +90,7 @@ class TestMapReduce < Test::Unit::TestCase
16
90
  BAZ
17
91
 
18
92
  map_code = <<BAZ
19
- #!/usr/local/bin/ruby -w
93
+ #!/usr/bin/ruby -w
20
94
  # Programmer: Chris Bunch
21
95
  # mapper-ruby.rb: Solves part of the EP parallel benchmark via the
22
96
  # MapReduce framework as follows:
@@ -47,7 +121,7 @@ def ep(k)
47
121
 
48
122
  max = [xk.abs, yk.abs].max
49
123
  l = max.floor
50
- puts l + " " + xk + " " + yk
124
+ puts l.to_s + " " + xk.to_s + " " + yk.to_s
51
125
  end
52
126
  end
53
127
 
@@ -68,7 +142,7 @@ loop {
68
142
  BAZ
69
143
 
70
144
  red_code = <<BAZ
71
- #!/usr/local/bin/ruby -w
145
+ #!/usr/bin/ruby -w
72
146
  # Programmer: Chris Bunch
73
147
  # reducer-ruby.rb: Solves part of the EP parallel benchmark via the
74
148
  # MapReduce framework as follows:
@@ -94,7 +168,7 @@ loop {
94
168
  current_l = l if current_l.nil?
95
169
 
96
170
  if l != current_l
97
- puts "bucket = " + current_l + ", |x| = " + x_count + ", |y| = " + y_count
171
+ puts "bucket = " + current_l.to_s + ", |x| = " + x_count.to_s + ", |y| = " + y_count.to_s
98
172
  current_l = l
99
173
  x_count = 0
100
174
  y_count = 0
@@ -113,8 +187,8 @@ loop {
113
187
  end
114
188
  }
115
189
 
116
- puts "bucket = " + current_l + ", |x| = " + x_count + ", |y| = " + y_count
117
- puts "sum x = " + sum_x + ", sum y = " + sum_y
190
+ puts "bucket = " + current_l.to_s + ", |x| = " + x_count.to_s + ", |y| = " + y_count.to_s
191
+ puts "sum x = " + sum_x.to_s + ", sum y = " + sum_y.to_s
118
192
 
119
193
  BAZ
120
194
 
@@ -127,8 +201,6 @@ BAZ
127
201
 
128
202
  tmp_folder = "/tmp/#{folder}"
129
203
  FileUtils.mkdir_p(tmp_folder)
130
- compiled = "#{tmp_folder}-compiled"
131
- compiled_code = "#{compiled}/HelloWorld"
132
204
 
133
205
  local_input = "#{tmp_folder}/#{input_name}"
134
206
  local_map = "#{tmp_folder}/#{map_source}"
@@ -199,9 +271,12 @@ BAZ
199
271
  def get_mr_output(output, expected, storage)
200
272
  result = TestHelper.get_job_output(output, storage)
201
273
 
274
+ TestHelper.write_file("/tmp/result", result)
275
+
202
276
  msg = "The MapReduce job you ran did not return the expected result. " +
203
277
  "We expected to see [#{expected}] but instead saw [#{result}]"
204
- assert_equal(result, expected, msg)
278
+ success = result.include?(expected)
279
+ assert(success, msg)
205
280
  end
206
281
  end
207
282
 
@@ -1,15 +1,51 @@
1
1
 
2
2
  class TestMPI < Test::Unit::TestCase
3
3
  def test_hello_world_code
4
- num_procs = [1]
4
+ num_procs = 1
5
+
6
+ STORAGE_TYPES.each { |storage|
7
+ run_hello_world_code(storage, num_procs)
8
+ }
9
+ end
5
10
 
11
+ def test_not_enough_procs
12
+ num_procs = 0
13
+
6
14
  STORAGE_TYPES.each { |storage|
7
- num_procs.each { |p|
8
- run_hello_world_code(storage, p)
15
+ msg = "Running an MPI compute job with p < n should have thrown " +
16
+ "an exception, when in fact it did not. Here we used #{storage} " +
17
+ "as the storage backend."
18
+
19
+ assert_raise(SystemExit, msg) {
20
+ run_hello_world_code(storage, num_procs)
9
21
  }
10
22
  }
11
23
  end
12
24
 
25
+ def test_bad_source_code
26
+ location = "/tmp/baz" + TestHelper.get_random_alphanumeric
27
+ output = "/bazboo2"
28
+ storage = "appdb"
29
+
30
+ msg = "Running an MPI compute job with a non-existent code location" +
31
+ " should have thrown an exception, when in fact it did not."
32
+
33
+ assert_raise(SystemExit, msg) {
34
+ TestHelper.start_job("mpi", location, output, storage)
35
+ }
36
+
37
+ FileUtils.mkdir_p(location)
38
+
39
+ bad_file_msg = "Running an MPI compute job with a code location that" +
40
+ " is not a file should have thrown an exception, when in fact it did not."
41
+
42
+ assert_raise(SystemExit, bad_file_msg) {
43
+ TestHelper.start_job("mpi", location, output, storage)
44
+ }
45
+
46
+ FileUtils.rmdir(location)
47
+ end
48
+
13
49
  def run_hello_world_code(storage, num_procs)
14
50
  expected_output = "0: We have 1 processors"
15
51
  ring_code = <<BAZ
@@ -1,13 +1,177 @@
1
1
 
2
2
  class TestStorage < Test::Unit::TestCase
3
+ def test_acl
4
+ STORAGE_TYPES.each { |storage|
5
+ run_in_acl(storage)
6
+ }
7
+ end
8
+
3
9
  def test_in_out
4
10
  STORAGE_TYPES.each { |storage|
5
11
  run_in_out(storage)
6
12
  }
7
13
  end
8
14
 
15
+ def test_run_in_out_w_env
16
+ STORAGE_TYPES.each { |storage|
17
+ run_in_out_w_env(storage)
18
+ }
19
+ end
20
+
21
+ def test_no_creds
22
+ creds = %w{
23
+ GSTORAGE_ACCESS_KEY GSTORAGE_SECRET_KEY GSTORAGE_URL
24
+ S3_ACCESS_KEY S3_SECRET_KEY S3_URL
25
+ WALRUS_ACCESS_KEY WALRUS_SECRET_KEY WALRUS_URL
26
+ }
27
+
28
+ old_creds = {}
29
+ creds.each { |c|
30
+ old_creds[c] = ENV[c]
31
+ ENV[c] = nil
32
+ }
33
+
34
+ # try an input job with creds in env but not in job
35
+ # should succeed
36
+
37
+ STORAGE_TYPES.each { |storage|
38
+ params = { :storage => storage }
39
+ testhelper = flexmock(TestHelper)
40
+ testhelper.should_receive(:get_storage_params).with(storage).and_return(params)
41
+
42
+ no_msg = "Trying to start a storage job and failing to specify " +
43
+ "necessary credentials should not have failed, but it did." +
44
+ " The storage type used was #{storage}."
45
+
46
+ msg = "Trying to start a storage job and failing to specify " +
47
+ "necessary credentials should have failed, but it didn't." +
48
+ " The storage type used was #{storage}."
49
+
50
+ if storage == "appdb"
51
+ assert_nothing_raised(SystemExit, no_msg) {
52
+ run_in_out(storage)
53
+ }
54
+ else
55
+ assert_raise(SystemExit, msg) {
56
+ run_in_out(storage)
57
+ }
58
+ end
59
+ }
60
+
61
+ creds.each { |c|
62
+ ENV[c] = old_creds[c]
63
+ }
64
+ end
65
+
66
+ def test_bad_storage
67
+ msg = "Specifying an incorrect storage backend should have thrown an " +
68
+ "exception, when in fact it did not."
69
+ assert_raise(SystemExit, msg) { run_in_out("blarg_storage") }
70
+ end
71
+
72
+ def test_bad_output_location
73
+ output = "baz-boo-for-me-too"
74
+
75
+ STORAGE_TYPES.each { |storage|
76
+ end_of_msg = " should have thrown an exception, when in fact it did not." +
77
+ "Here we tested with #{storage} as the storage backend."
78
+
79
+ no_slash_msg = "Specifying an output location without a leading slash"
80
+
81
+ assert_raise(SystemExit, no_slash_msg + end_of_msg) {
82
+ TestHelper.get_job_output(output, storage)
83
+ }
84
+
85
+ no_output_msg = "Specifying an output job with a blank output parameter"
86
+ assert_raise(SystemExit, no_output_msg + end_of_msg) {
87
+ TestHelper.get_job_output("", storage)
88
+ }
89
+
90
+ nil_output_msg = "Specifying an output job with a nil output"
91
+ assert_raise(SystemExit, nil_output_msg + end_of_msg) {
92
+ TestHelper.get_job_output(nil, storage)
93
+ }
94
+ }
95
+ end
96
+
97
+ def run_in_acl(storage)
98
+ contents = TestHelper.get_random_alphanumeric(1024) + "+&baz"
99
+ suffix = "neptune-testfile-#{TestHelper.get_random_alphanumeric}"
100
+ local = "/tmp/#{suffix}"
101
+ TestHelper.write_file(local, contents)
102
+ remote = TestHelper.get_output_location(suffix, storage)
103
+
104
+ in_params = {
105
+ :type => "input",
106
+ :local => local,
107
+ :remote => remote
108
+ }.merge(TestHelper.get_storage_params(storage))
109
+
110
+ input_result = neptune(in_params)
111
+
112
+ msg = "We were unable to store a file in the datastore. We " +
113
+ " got back this: #{msg}"
114
+ assert(input_result, msg)
115
+
116
+ get_params = {
117
+ :type => "get-acl",
118
+ :output => remote
119
+ }.merge(TestHelper.get_storage_params(storage))
120
+
121
+ acl = neptune(get_params)
122
+
123
+ get_acl_msg1 = "The default ACL should be private, but was [#{acl}] instead."
124
+ assert_equal("private", acl, get_acl_msg1)
125
+
126
+ # TODO: set acl is currently broken - once we fix it, we should
127
+ # do the following:
128
+
129
+ # set acl to something else
130
+ # verify that it was set correctly
131
+
132
+ FileUtils.rm_rf(local)
133
+ end
134
+
135
+ def run_in_out_w_env(storage)
136
+ creds = %w{
137
+ GSTORAGE_ACCESS_KEY GSTORAGE_SECRET_KEY GSTORAGE_URL
138
+ S3_ACCESS_KEY S3_SECRET_KEY S3_URL
139
+ WALRUS_ACCESS_KEY WALRUS_SECRET_KEY WALRUS_URL
140
+ }
141
+
142
+ old_creds = {}
143
+ creds.each { |c|
144
+ old_creds[c] = ENV[c]
145
+ }
146
+
147
+ s3_creds = %w{ EC2_ACCESS_KEY EC2_SECRET_KEY S3_URL }
148
+
149
+ needed_creds = TestHelper.get_storage_params(storage)
150
+ puts needed_creds.inspect
151
+
152
+ params = { :storage => storage }
153
+ testhelper = flexmock(TestHelper)
154
+ testhelper.should_receive(:get_storage_params).with(storage).and_return(params)
155
+
156
+ s3_creds.each { |c|
157
+ ENV[c] = needed_creds[c.to_sym]
158
+ }
159
+
160
+ run_in_out(storage)
161
+
162
+ s3_creds.each { |c|
163
+ ENV[c] = nil
164
+ }
165
+
166
+ creds.each { |c|
167
+ ENV[c] = old_creds[c]
168
+ }
169
+
170
+ testhelper.flexmock_teardown
171
+ end
172
+
9
173
  def run_in_out(storage)
10
- contents = TestHelper.get_random_alphanumeric(1024)
174
+ contents = TestHelper.get_random_alphanumeric(1024) + "+&baz"
11
175
  suffix = "neptune-testfile-#{TestHelper.get_random_alphanumeric}"
12
176
  local = "/tmp/#{suffix}"
13
177
  TestHelper.write_file(local, contents)
@@ -98,11 +98,9 @@ module TestHelper
98
98
  :EC2_SECRET_KEY => ENV['WALRUS_SECRET_KEY'],
99
99
  :S3_URL => ENV['WALRUS_URL']
100
100
  }
101
- elsif storage == "appdb"
102
- return {}
103
- # nothing special to do
104
101
  else
105
- abort "Storage specified was not an acceptable value: #{storage}"
102
+ return { :storage => storage }
103
+ # nothing special to do
106
104
  end
107
105
  end
108
106
 
@@ -1,5 +1,4 @@
1
- #STORAGE_TYPES = ["gstorage"]
2
- STORAGE_TYPES = ["appdb", "gstorage", "s3", "walrus"]
1
+ STORAGE_TYPES = ["appdb", "gstorage", "s3", "walrus"] - ["appdb"]
3
2
 
4
3
  $:.unshift File.join(File.dirname(__FILE__), "..", "lib")
5
4
  require 'neptune'
@@ -13,6 +12,7 @@ S3_ACCESS_KEY S3_SECRET_KEY S3_URL
13
12
  WALRUS_ACCESS_KEY WALRUS_SECRET_KEY WALRUS_URL }
14
13
 
15
14
  require 'test/unit'
15
+ require 'flexmock/test_unit'
16
16
 
17
17
  REQUIRED_CREDS.each { |cred|
18
18
  msg = "The environment variable #{cred} was not set. Please " +
@@ -25,17 +25,13 @@ msg = "AppScale is not currently running at " +
25
25
  "#{APPSCALE_HEAD_NODE_IP}. Please start AppScale and try again."
26
26
  abort(msg) unless TestHelper.is_appscale_running?(APPSCALE_HEAD_NODE_IP)
27
27
 
28
- TEST_ALL_WORKING = true
29
-
30
- if TEST_ALL_WORKING
31
- require 'tc_c'
32
- require 'tc_dfsp'
33
- require 'tc_dwssa'
34
- require 'tc_erlang'
35
- require 'tc_mpi'
36
- require 'tc_storage'
37
- require 'tc_upc'
38
- require 'tc_x10'
39
- else
40
- require 'tc_mapreduce'
41
- end
28
+ require 'tc_c'
29
+ require 'tc_dfsp'
30
+ require 'tc_dwssa'
31
+ require 'tc_erlang'
32
+ require 'tc_mapreduce'
33
+ require 'tc_mpi'
34
+ require 'tc_storage'
35
+ require 'tc_upc'
36
+ require 'tc_x10'
37
+
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: neptune
3
3
  version: !ruby/object:Gem::Version
4
- hash: 17
4
+ hash: 15
5
5
  prerelease:
6
6
  segments:
7
7
  - 0
8
8
  - 0
9
- - 7
10
- version: 0.0.7
9
+ - 8
10
+ version: 0.0.8
11
11
  platform: ruby
12
12
  authors:
13
13
  - Chris Bunch
@@ -15,7 +15,7 @@ autorequire: neptune
15
15
  bindir: bin
16
16
  cert_chain: []
17
17
 
18
- date: 2011-04-02 00:00:00 -07:00
18
+ date: 2011-04-08 00:00:00 -07:00
19
19
  default_executable: neptune
20
20
  dependencies: []
21
21