neptune 0.0.6 → 0.0.7
Sign up to get free protection for your applications and to get access to all the features.
- data/README +19 -3
- data/doc/AppControllerClient.html +53 -7
- data/doc/CommonFunctions.html +16 -12
- data/doc/Object.html +200 -39
- data/doc/README.html +55 -17
- data/doc/bin/neptune.html +2 -6
- data/doc/created.rid +6 -6
- data/doc/index.html +6 -2
- data/doc/lib/app_controller_client_rb.html +1 -1
- data/doc/lib/common_functions_rb.html +1 -1
- data/doc/lib/neptune_rb.html +56 -0
- data/lib/common_functions.rb +0 -1
- data/lib/neptune.rb +17 -18
- data/test/tc_c.rb +57 -0
- data/test/tc_dfsp.rb +37 -0
- data/test/tc_dwssa.rb +38 -0
- data/test/tc_erlang.rb +157 -0
- data/test/tc_mapreduce.rb +207 -0
- data/test/tc_mpi.rb +124 -0
- data/test/tc_storage.rb +45 -0
- data/test/tc_upc.rb +75 -0
- data/test/tc_x10.rb +94 -0
- data/test/test_helper.rb +137 -0
- data/test/ts_neptune.rb +41 -0
- metadata +16 -4
data/test/tc_erlang.rb
ADDED
@@ -0,0 +1,157 @@
|
|
1
|
+
|
2
|
+
class TestErlang < Test::Unit::TestCase
|
3
|
+
def test_ring_code
|
4
|
+
STORAGE_TYPES.each { |storage|
|
5
|
+
run_ring_code(storage)
|
6
|
+
}
|
7
|
+
end
|
8
|
+
|
9
|
+
def run_ring_code(storage)
|
10
|
+
expected_output = "total time for"
|
11
|
+
ring_code = <<BAZ
|
12
|
+
-module(ring).
|
13
|
+
-compile(export_all).
|
14
|
+
|
15
|
+
% spawn N processes
|
16
|
+
% M times, send message to proc 1
|
17
|
+
% when proc i recv's a message, send it to i+1
|
18
|
+
|
19
|
+
% distributed version:
|
20
|
+
% global var for master node
|
21
|
+
% var with list of nodes
|
22
|
+
% if master:
|
23
|
+
% no high-level changes
|
24
|
+
% else:
|
25
|
+
% wait for master to give me work
|
26
|
+
% loop until i receive a kill message
|
27
|
+
% which breaks this loop and kills this node
|
28
|
+
|
29
|
+
% smaller changes:
|
30
|
+
|
31
|
+
start(Name, Next) ->
|
32
|
+
%io:format("creating proc named ~p with next proc named ~p~n", [Name, Next]),
|
33
|
+
register(Name, spawn(fun() -> ring:startMe(Next) end)),
|
34
|
+
Name.
|
35
|
+
|
36
|
+
startMe(Next) ->
|
37
|
+
receive
|
38
|
+
stop ->
|
39
|
+
void;
|
40
|
+
{Message, Initiator} ->
|
41
|
+
NextPid = whereis(Next),
|
42
|
+
if
|
43
|
+
is_pid(NextPid) ->
|
44
|
+
%io:format("sending message to next proc, ~p~n", [Next]),
|
45
|
+
NextPid ! {Message, Initiator},
|
46
|
+
startMe(Next);
|
47
|
+
true ->
|
48
|
+
%io:format("no next proc to send message to!~n"),
|
49
|
+
Initiator ! done,
|
50
|
+
startMe(Next)
|
51
|
+
end
|
52
|
+
end.
|
53
|
+
|
54
|
+
generateProcs(Num) ->
|
55
|
+
if
|
56
|
+
Num > 0 ->
|
57
|
+
[start(ring:i_to_a(Num), ring:i_to_a(Num-1)) | ring:generateProcs(Num-1)];
|
58
|
+
true ->
|
59
|
+
[]
|
60
|
+
end.
|
61
|
+
|
62
|
+
stopProcs([H | T]) ->
|
63
|
+
HeadPid = whereis(H),
|
64
|
+
HeadPid ! stop,
|
65
|
+
stopProcs(T);
|
66
|
+
stopProcs([]) ->
|
67
|
+
void.
|
68
|
+
|
69
|
+
sendMessageToFirst([H | T], Message) ->
|
70
|
+
HeadPid = whereis(H),
|
71
|
+
HeadPid ! {Message, self()},
|
72
|
+
receive
|
73
|
+
done -> void
|
74
|
+
end.
|
75
|
+
|
76
|
+
a_to_i(A) ->
|
77
|
+
list_to_integer(atom_to_list(A)).
|
78
|
+
|
79
|
+
i_to_a(I) ->
|
80
|
+
list_to_atom(integer_to_list(I)).
|
81
|
+
|
82
|
+
times(Num, Fun) ->
|
83
|
+
if Num > 0 ->
|
84
|
+
Fun(),
|
85
|
+
times(Num-1, Fun);
|
86
|
+
true ->
|
87
|
+
void
|
88
|
+
end.
|
89
|
+
|
90
|
+
main() ->
|
91
|
+
statistics(wall_clock),
|
92
|
+
NumProcs = 10,
|
93
|
+
NumMessages = 1,
|
94
|
+
Processes = ring:generateProcs(NumProcs),
|
95
|
+
Message = "hello!",
|
96
|
+
times(NumMessages, fun() -> sendMessageToFirst(Processes, Message) end),
|
97
|
+
ring:stopProcs(Processes),
|
98
|
+
{_, TotalTime} = statistics(wall_clock),
|
99
|
+
TimeInMicroseconds = TotalTime * 1000,
|
100
|
+
io:format("total time for N = ~p, M = ~p, is ~p microseconds~n", [NumProcs, NumMessages, TimeInMicroseconds]),
|
101
|
+
exit('baz').
|
102
|
+
|
103
|
+
BAZ
|
104
|
+
|
105
|
+
contents = TestHelper.get_random_alphanumeric(1024)
|
106
|
+
folder = "ring-#{TestHelper.get_random_alphanumeric}"
|
107
|
+
source = "ring.erl"
|
108
|
+
|
109
|
+
tmp_folder = "/tmp/#{folder}"
|
110
|
+
FileUtils.mkdir_p(tmp_folder)
|
111
|
+
compiled = "#{tmp_folder}-compiled"
|
112
|
+
compiled_code = "#{compiled}/ring.beam"
|
113
|
+
|
114
|
+
local = "#{tmp_folder}/#{source}"
|
115
|
+
TestHelper.write_file(local, ring_code)
|
116
|
+
|
117
|
+
output = TestHelper.get_output_location(folder, storage)
|
118
|
+
|
119
|
+
compile_erlang_code(tmp_folder, source, compiled)
|
120
|
+
start_erlang_code(compiled_code, output, storage)
|
121
|
+
get_erlang_output(output, expected_output, storage)
|
122
|
+
|
123
|
+
FileUtils.rm_rf(tmp_folder)
|
124
|
+
FileUtils.rm_rf(compiled)
|
125
|
+
end
|
126
|
+
|
127
|
+
def compile_erlang_code(location, main_file, compiled)
|
128
|
+
std_out, std_err = TestHelper.compile_code(location, main_file, compiled)
|
129
|
+
|
130
|
+
make = "HOME=/root erlc ring.erl"
|
131
|
+
msg = "The Erlang Ring code did not compile as expected. It should have " +
|
132
|
+
"compiled with the command [#{make}] instead of [#{std_out}]."
|
133
|
+
assert(std_out.include?(make), msg)
|
134
|
+
|
135
|
+
msg = "The Erlang Ring code did not compile successfully. It reported " +
|
136
|
+
"the following error: #{std_err}"
|
137
|
+
assert_nil(std_err, msg)
|
138
|
+
end
|
139
|
+
|
140
|
+
def start_erlang_code(code_location, output, storage)
|
141
|
+
status = TestHelper.start_job("erlang", code_location, output, storage)
|
142
|
+
|
143
|
+
msg = "Your job was not started successfully. The failure message " +
|
144
|
+
"reported was #{status[:msg]}"
|
145
|
+
assert_equal(status[:result], :success, msg)
|
146
|
+
end
|
147
|
+
|
148
|
+
def get_erlang_output(output, expected, storage)
|
149
|
+
result = TestHelper.get_job_output(output, storage)
|
150
|
+
|
151
|
+
msg = "The Erlang job you ran did not return the expected result. " +
|
152
|
+
"We expected to see [#{expected}] but instead saw [#{result}]"
|
153
|
+
out_contains = result.include?(expected)
|
154
|
+
assert(out_contains, msg)
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
@@ -0,0 +1,207 @@
|
|
1
|
+
|
2
|
+
class TestMapReduce < Test::Unit::TestCase
|
3
|
+
def test_mr_streaming_code
|
4
|
+
STORAGE_TYPES.each { |storage|
|
5
|
+
run_streaming_code(storage)
|
6
|
+
}
|
7
|
+
end
|
8
|
+
|
9
|
+
def run_streaming_code(storage)
|
10
|
+
expected_output = "0: We have 1 processors"
|
11
|
+
input = <<BAZ
|
12
|
+
1 32
|
13
|
+
33 64
|
14
|
+
65 96
|
15
|
+
97 128
|
16
|
+
BAZ
|
17
|
+
|
18
|
+
map_code = <<BAZ
|
19
|
+
#!/usr/local/bin/ruby -w
|
20
|
+
# Programmer: Chris Bunch
|
21
|
+
# mapper-ruby.rb: Solves part of the EP parallel benchmark via the
|
22
|
+
# MapReduce framework as follows:
|
23
|
+
# Input: Takes in ranges of k values to compute over STDIN.
|
24
|
+
# Output: list [l, X_k, Y_k]
|
25
|
+
|
26
|
+
A = 5 ** 13
|
27
|
+
S = 271828183
|
28
|
+
MIN_VAL = 2 ** -46
|
29
|
+
MAX_VAL = 2 ** 46
|
30
|
+
|
31
|
+
def generate_random(k)
|
32
|
+
xk = (A ** k) * S % MAX_VAL
|
33
|
+
MIN_VAL * xk
|
34
|
+
end
|
35
|
+
|
36
|
+
def ep(k)
|
37
|
+
k = Integer(k)
|
38
|
+
|
39
|
+
xj = generate_random(k)
|
40
|
+
yj = generate_random(k+1)
|
41
|
+
|
42
|
+
t = xj * xj + yj * yj
|
43
|
+
|
44
|
+
if t <= 1
|
45
|
+
xk = xj * Math.sqrt(-2 * Math.log(t) / t)
|
46
|
+
yk = yj * Math.sqrt(-2 * Math.log(t) / t)
|
47
|
+
|
48
|
+
max = [xk.abs, yk.abs].max
|
49
|
+
l = max.floor
|
50
|
+
puts l + " " + xk + " " + yk
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
loop {
|
55
|
+
input = STDIN.gets
|
56
|
+
break if input.nil?
|
57
|
+
start, fin = input.chomp.split
|
58
|
+
start = Integer(start)
|
59
|
+
fin = Integer(fin)
|
60
|
+
current = start
|
61
|
+
loop {
|
62
|
+
ep(current)
|
63
|
+
current = current + 2
|
64
|
+
break if current > fin
|
65
|
+
}
|
66
|
+
}
|
67
|
+
|
68
|
+
BAZ
|
69
|
+
|
70
|
+
red_code = <<BAZ
|
71
|
+
#!/usr/local/bin/ruby -w
|
72
|
+
# Programmer: Chris Bunch
|
73
|
+
# reducer-ruby.rb: Solves part of the EP parallel benchmark via the
|
74
|
+
# MapReduce framework as follows:
|
75
|
+
# Input: list [l, X_k, Y_k]
|
76
|
+
# Output: [l, sum(X_k), sum(Y_k)]
|
77
|
+
|
78
|
+
current_l = nil
|
79
|
+
|
80
|
+
x_count = 0
|
81
|
+
y_count = 0
|
82
|
+
|
83
|
+
sum_x = 0.0
|
84
|
+
sum_y = 0.0
|
85
|
+
|
86
|
+
loop {
|
87
|
+
input = STDIN.gets
|
88
|
+
break if input.nil?
|
89
|
+
l, x, y = input.chomp.split
|
90
|
+
l = Integer(l)
|
91
|
+
x = Float(x)
|
92
|
+
y = Float(y)
|
93
|
+
|
94
|
+
current_l = l if current_l.nil?
|
95
|
+
|
96
|
+
if l != current_l
|
97
|
+
puts "bucket = " + current_l + ", |x| = " + x_count + ", |y| = " + y_count
|
98
|
+
current_l = l
|
99
|
+
x_count = 0
|
100
|
+
y_count = 0
|
101
|
+
end
|
102
|
+
|
103
|
+
sum_x = sum_x + x
|
104
|
+
sum_y = sum_y + y
|
105
|
+
|
106
|
+
abs_x = x.abs
|
107
|
+
abs_y = y.abs
|
108
|
+
|
109
|
+
if abs_x > abs_y
|
110
|
+
x_count = x_count + 1
|
111
|
+
else
|
112
|
+
y_count = y_count + 1
|
113
|
+
end
|
114
|
+
}
|
115
|
+
|
116
|
+
puts "bucket = " + current_l + ", |x| = " + x_count + ", |y| = " + y_count
|
117
|
+
puts "sum x = " + sum_x + ", sum y = " + sum_y
|
118
|
+
|
119
|
+
BAZ
|
120
|
+
|
121
|
+
contents = TestHelper.get_random_alphanumeric(1024)
|
122
|
+
folder = "ep-#{TestHelper.get_random_alphanumeric}"
|
123
|
+
|
124
|
+
input_name = "input"
|
125
|
+
map_source = "map.rb"
|
126
|
+
red_source = "reduce.rb"
|
127
|
+
|
128
|
+
tmp_folder = "/tmp/#{folder}"
|
129
|
+
FileUtils.mkdir_p(tmp_folder)
|
130
|
+
compiled = "#{tmp_folder}-compiled"
|
131
|
+
compiled_code = "#{compiled}/HelloWorld"
|
132
|
+
|
133
|
+
local_input = "#{tmp_folder}/#{input_name}"
|
134
|
+
local_map = "#{tmp_folder}/#{map_source}"
|
135
|
+
local_red = "#{tmp_folder}/#{red_source}"
|
136
|
+
|
137
|
+
TestHelper.write_file(local_input, input)
|
138
|
+
TestHelper.write_file(local_map, map_code)
|
139
|
+
TestHelper.write_file(local_red, red_code)
|
140
|
+
|
141
|
+
remote_input = TestHelper.get_output_location("#{folder}-input", storage)
|
142
|
+
remote_map = TestHelper.get_output_location("#{folder}-map.rb", storage, notxt=true)
|
143
|
+
remote_red = TestHelper.get_output_location("#{folder}-reduce.rb", storage, notxt=true)
|
144
|
+
remote_output = TestHelper.get_output_location("#{folder}-output", storage)
|
145
|
+
|
146
|
+
put_file_in_storage(local_input, remote_input, storage)
|
147
|
+
put_file_in_storage(local_map, remote_map, storage)
|
148
|
+
put_file_in_storage(local_red, remote_red, storage)
|
149
|
+
|
150
|
+
start_mr_streaming_code(remote_input, remote_output, remote_map, remote_red, storage)
|
151
|
+
get_mr_output(remote_output, expected_output, storage)
|
152
|
+
|
153
|
+
FileUtils.rm_rf(local_input)
|
154
|
+
FileUtils.rm_rf(local_map)
|
155
|
+
FileUtils.rm_rf(local_red)
|
156
|
+
end
|
157
|
+
|
158
|
+
def put_file_in_storage(local, remote, storage)
|
159
|
+
params = {
|
160
|
+
:type => "input",
|
161
|
+
:local => local,
|
162
|
+
:remote => remote
|
163
|
+
}.merge(TestHelper.get_storage_params(storage))
|
164
|
+
|
165
|
+
input_result = neptune(params)
|
166
|
+
|
167
|
+
msg = "We were unable to store a file in the database. We " +
|
168
|
+
" got back this: #{msg}"
|
169
|
+
assert(input_result, msg)
|
170
|
+
end
|
171
|
+
|
172
|
+
def start_mr_streaming_code(input, output, map, reduce, storage)
|
173
|
+
params = {
|
174
|
+
:type => "mapreduce",
|
175
|
+
:input => input,
|
176
|
+
:output => output,
|
177
|
+
:map => map,
|
178
|
+
:reduce => reduce,
|
179
|
+
:nodes_to_use => 1
|
180
|
+
}.merge(TestHelper.get_storage_params(storage))
|
181
|
+
|
182
|
+
status = nil
|
183
|
+
|
184
|
+
loop {
|
185
|
+
status = neptune(params)
|
186
|
+
if status[:msg] =~ /not enough free nodes/
|
187
|
+
puts status[:msg]
|
188
|
+
else
|
189
|
+
break
|
190
|
+
end
|
191
|
+
sleep(5)
|
192
|
+
}
|
193
|
+
|
194
|
+
msg = "Your job was not started successfully. The failure message " +
|
195
|
+
"reported was #{status[:msg]}"
|
196
|
+
assert_equal(status[:result], :success, msg)
|
197
|
+
end
|
198
|
+
|
199
|
+
def get_mr_output(output, expected, storage)
|
200
|
+
result = TestHelper.get_job_output(output, storage)
|
201
|
+
|
202
|
+
msg = "The MapReduce job you ran did not return the expected result. " +
|
203
|
+
"We expected to see [#{expected}] but instead saw [#{result}]"
|
204
|
+
assert_equal(result, expected, msg)
|
205
|
+
end
|
206
|
+
end
|
207
|
+
|
data/test/tc_mpi.rb
ADDED
@@ -0,0 +1,124 @@
|
|
1
|
+
|
2
|
+
class TestMPI < Test::Unit::TestCase
|
3
|
+
def test_hello_world_code
|
4
|
+
num_procs = [1]
|
5
|
+
|
6
|
+
STORAGE_TYPES.each { |storage|
|
7
|
+
num_procs.each { |p|
|
8
|
+
run_hello_world_code(storage, p)
|
9
|
+
}
|
10
|
+
}
|
11
|
+
end
|
12
|
+
|
13
|
+
def run_hello_world_code(storage, num_procs)
|
14
|
+
expected_output = "0: We have 1 processors"
|
15
|
+
ring_code = <<BAZ
|
16
|
+
/*
|
17
|
+
"Hello World" MPI Test Program
|
18
|
+
*/
|
19
|
+
#include <mpi.h>
|
20
|
+
#include <stdio.h>
|
21
|
+
#include <string.h>
|
22
|
+
|
23
|
+
#define BUFSIZE 128
|
24
|
+
#define TAG 0
|
25
|
+
|
26
|
+
int main(int argc, char *argv[])
|
27
|
+
{
|
28
|
+
char idstr[32];
|
29
|
+
char buff[BUFSIZE];
|
30
|
+
int numprocs;
|
31
|
+
int myid;
|
32
|
+
int i;
|
33
|
+
MPI_Status stat;
|
34
|
+
|
35
|
+
MPI_Init(&argc,&argv); /* all MPI programs start with MPI_Init; all 'N' processes exist thereafter */
|
36
|
+
MPI_Comm_size(MPI_COMM_WORLD,&numprocs); /* find out how big the SPMD world is */
|
37
|
+
MPI_Comm_rank(MPI_COMM_WORLD,&myid); /* and this processes' rank is */
|
38
|
+
|
39
|
+
/* At this point, all programs are running equivalently, the rank distinguishes
|
40
|
+
the roles of the programs in the SPMD model, with rank 0 often used specially... */
|
41
|
+
if(myid == 0)
|
42
|
+
{
|
43
|
+
printf("%d: We have %d processors", myid, numprocs);
|
44
|
+
for(i=1;i<numprocs;i++)
|
45
|
+
{
|
46
|
+
sprintf(buff, "Hello %d! ", i);
|
47
|
+
MPI_Send(buff, BUFSIZE, MPI_CHAR, i, TAG, MPI_COMM_WORLD);
|
48
|
+
}
|
49
|
+
for(i=1;i<numprocs;i++)
|
50
|
+
{
|
51
|
+
MPI_Recv(buff, BUFSIZE, MPI_CHAR, i, TAG, MPI_COMM_WORLD, &stat);
|
52
|
+
printf("%d: %s", myid, buff);
|
53
|
+
}
|
54
|
+
}
|
55
|
+
else
|
56
|
+
{
|
57
|
+
/* receive from rank 0: */
|
58
|
+
MPI_Recv(buff, BUFSIZE, MPI_CHAR, 0, TAG, MPI_COMM_WORLD, &stat);
|
59
|
+
sprintf(idstr, "Processor %d ", myid);
|
60
|
+
strncat(buff, idstr, BUFSIZE-1);
|
61
|
+
strncat(buff, "reporting for duty", BUFSIZE-1);
|
62
|
+
/* send to rank 0: */
|
63
|
+
MPI_Send(buff, BUFSIZE, MPI_CHAR, 0, TAG, MPI_COMM_WORLD);
|
64
|
+
}
|
65
|
+
|
66
|
+
MPI_Finalize(); /* MPI Programs end with MPI Finalize; this is a weak synchronization point */
|
67
|
+
return 0;
|
68
|
+
}
|
69
|
+
|
70
|
+
BAZ
|
71
|
+
|
72
|
+
contents = TestHelper.get_random_alphanumeric(1024)
|
73
|
+
folder = "hello-world-#{TestHelper.get_random_alphanumeric}"
|
74
|
+
source = "HelloWorld.c"
|
75
|
+
|
76
|
+
tmp_folder = "/tmp/#{folder}"
|
77
|
+
FileUtils.mkdir_p(tmp_folder)
|
78
|
+
compiled = "#{tmp_folder}-compiled"
|
79
|
+
compiled_code = "#{compiled}/HelloWorld"
|
80
|
+
|
81
|
+
local = "#{tmp_folder}/#{source}"
|
82
|
+
TestHelper.write_file(local, ring_code)
|
83
|
+
|
84
|
+
output = TestHelper.get_output_location(folder, storage)
|
85
|
+
|
86
|
+
compile_mpi_code(tmp_folder, source, compiled)
|
87
|
+
start_mpi_code(compiled_code, num_procs, output, storage)
|
88
|
+
get_mpi_output(output, expected_output, storage)
|
89
|
+
|
90
|
+
FileUtils.rm_rf(tmp_folder)
|
91
|
+
FileUtils.rm_rf(compiled)
|
92
|
+
end
|
93
|
+
|
94
|
+
def compile_mpi_code(location, main_file, compiled)
|
95
|
+
std_out, std_err = TestHelper.compile_code(location, main_file, compiled)
|
96
|
+
|
97
|
+
make = "mpicc HelloWorld.c -o HelloWorld -Wall"
|
98
|
+
msg = "The MPI code did not compile as expected. It should have " +
|
99
|
+
"compiled with the command [#{make}] instead of [#{std_out}]."
|
100
|
+
assert_equal(std_out, make, msg)
|
101
|
+
|
102
|
+
msg = "The MPI code did not compile successfully. It reported " +
|
103
|
+
"the following error: #{std_err}"
|
104
|
+
assert_nil(std_err, msg)
|
105
|
+
end
|
106
|
+
|
107
|
+
def start_mpi_code(code_location, num_procs, output, storage)
|
108
|
+
params = { :procs_to_use => num_procs }
|
109
|
+
status = TestHelper.start_job("mpi", code_location, output, storage, params)
|
110
|
+
|
111
|
+
msg = "Your job was not started successfully. The failure message " +
|
112
|
+
"reported was #{status[:msg]}"
|
113
|
+
assert_equal(status[:result], :success, msg)
|
114
|
+
end
|
115
|
+
|
116
|
+
def get_mpi_output(output, expected, storage)
|
117
|
+
result = TestHelper.get_job_output(output, storage)
|
118
|
+
|
119
|
+
msg = "The MPI job you ran did not return the expected result. " +
|
120
|
+
"We expected to see [#{expected}] but instead saw [#{result}]"
|
121
|
+
assert_equal(result, expected, msg)
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|