neptune 0.1.1 → 0.1.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (103) hide show
  1. data/README +7 -4
  2. data/doc/AppControllerClient.html +12 -4
  3. data/doc/CommonFunctions.html +55 -42
  4. data/doc/Kernel.html +187 -0
  5. data/doc/LICENSE.html +2 -0
  6. data/doc/Object.html +488 -198
  7. data/doc/README.html +26 -5
  8. data/doc/bin/neptune.html +1 -1
  9. data/doc/created.rid +6 -6
  10. data/doc/index.html +20 -2
  11. data/doc/lib/app_controller_client_rb.html +2 -2
  12. data/doc/lib/common_functions_rb.html +2 -2
  13. data/doc/lib/neptune_rb.html +3 -1
  14. data/lib/app_controller_client.rb +2 -2
  15. data/lib/common_functions.rb +50 -24
  16. data/lib/neptune.rb +224 -159
  17. data/samples/appscale/add_appserver.rb +10 -0
  18. data/samples/appscale/add_database.rb +9 -0
  19. data/samples/appscale/add_loadbalancer.rb +9 -0
  20. data/samples/appscale/add_slave.rb +9 -0
  21. data/samples/c/compile_helloworld.rb +10 -0
  22. data/samples/c/helloworld/helloworld.c +6 -0
  23. data/samples/erlang/compile_erlang_ring.rb +10 -0
  24. data/samples/erlang/get_erlang_output.rb +8 -0
  25. data/samples/erlang/ring/Makefile +3 -0
  26. data/samples/erlang/ring/ring.erl +90 -0
  27. data/samples/erlang/run_erlang_ring.rb +6 -0
  28. data/samples/go/compile_hello.rb +10 -0
  29. data/samples/go/get_hello_output.rb +6 -0
  30. data/samples/go/hello/hello.go +8 -0
  31. data/samples/go/put_input.rb +8 -0
  32. data/samples/go/run_hello.rb +9 -0
  33. data/samples/mapreduce/expected-output.txt +7078 -0
  34. data/samples/mapreduce/get_mapreduce_output.rb +4 -0
  35. data/samples/mapreduce/hadoop-0.20.0-examples.jar +0 -0
  36. data/samples/mapreduce/input-10 +64 -0
  37. data/samples/mapreduce/input-30 +64 -0
  38. data/samples/mapreduce/input-7 +4 -0
  39. data/samples/mapreduce/map.rb +48 -0
  40. data/samples/mapreduce/reduce.rb +48 -0
  41. data/samples/mapreduce/run_java_mr.rb +14 -0
  42. data/samples/mapreduce/run_mapreduce.rb +13 -0
  43. data/samples/mapreduce/the-end-of-time.txt +11256 -0
  44. data/samples/mpi/Makefile +22 -0
  45. data/samples/mpi/MpiQueen +0 -0
  46. data/samples/mpi/compile_mpi_ring.rb +10 -0
  47. data/samples/mpi/compile_x10_nqueens.rb +8 -0
  48. data/samples/mpi/cpi +0 -0
  49. data/samples/mpi/get_mpi_output.rb +5 -0
  50. data/samples/mpi/get_ring_output.rb +5 -0
  51. data/samples/mpi/hw2.c +205 -0
  52. data/samples/mpi/hw2harness.c +84 -0
  53. data/samples/mpi/hw2harness.h +45 -0
  54. data/samples/mpi/powermethod +0 -0
  55. data/samples/mpi/ring/Makefile +2 -0
  56. data/samples/mpi/ring/Ring.c +76 -0
  57. data/samples/mpi/run_mpi_cpi.rb +10 -0
  58. data/samples/mpi/run_mpi_nqueens.np +6 -0
  59. data/samples/mpi/run_mpi_powermethod.rb +8 -0
  60. data/samples/mpi/run_mpi_ring.rb +12 -0
  61. data/samples/r/compile_hello.rb +10 -0
  62. data/samples/r/get_hello_output.rb +6 -0
  63. data/samples/r/hello/hello.r +1 -0
  64. data/samples/r/put_input.rb +8 -0
  65. data/samples/r/run_hello.rb +9 -0
  66. data/samples/upc/compile_upc_helloworld.rb +10 -0
  67. data/samples/upc/compile_upc_ring.rb +11 -0
  68. data/samples/upc/get_mpi_output.rb +8 -0
  69. data/samples/upc/helloworld/HelloWorld.c +9 -0
  70. data/samples/upc/helloworld/Makefile +3 -0
  71. data/samples/upc/ring/Makefile +3 -0
  72. data/samples/upc/ring/Ring.c +116 -0
  73. data/samples/upc/run_upc_helloworld.rb +12 -0
  74. data/samples/upc/run_upc_ring.rb +12 -0
  75. data/samples/x10/MyPowerMethod +0 -0
  76. data/samples/x10/MyPowerMethod.x10 +236 -0
  77. data/samples/x10/NQueensDist +0 -0
  78. data/samples/x10/NQueensDist.x10 +112 -0
  79. data/samples/x10/compile_x10_nqueens.rb +7 -0
  80. data/samples/x10/compile_x10_ring.rb +12 -0
  81. data/samples/x10/get_x10_output.rb +8 -0
  82. data/samples/x10/ring/Makefile +3 -0
  83. data/samples/x10/ring/Ring.x10 +28 -0
  84. data/samples/x10/ring/RingOld.x10 +68 -0
  85. data/samples/x10/run_x10_nqueens.rb +6 -0
  86. data/samples/x10/run_x10_powermethod.rb +7 -0
  87. data/samples/x10/run_x10_ring.rb +6 -0
  88. data/test/{tc_c.rb → integration/tc_c.rb} +2 -2
  89. data/test/{tc_dfsp.rb → integration/tc_dfsp.rb} +0 -0
  90. data/test/{tc_dwssa.rb → integration/tc_dwssa.rb} +0 -0
  91. data/test/{tc_erlang.rb → integration/tc_erlang.rb} +0 -0
  92. data/test/{tc_mapreduce.rb → integration/tc_mapreduce.rb} +0 -0
  93. data/test/{tc_mpi.rb → integration/tc_mpi.rb} +0 -0
  94. data/test/{tc_storage.rb → integration/tc_storage.rb} +0 -0
  95. data/test/{tc_upc.rb → integration/tc_upc.rb} +0 -0
  96. data/test/{tc_x10.rb → integration/tc_x10.rb} +0 -0
  97. data/test/{test_helper.rb → integration/test_helper.rb} +0 -0
  98. data/test/{ts_neptune.rb → integration/ts_neptune.rb} +2 -2
  99. data/test/unit/test_app_controller_client.rb +106 -0
  100. data/test/unit/test_common_functions.rb +106 -0
  101. data/test/unit/test_neptune.rb +208 -0
  102. data/test/unit/ts_all.rb +6 -0
  103. metadata +91 -15
@@ -0,0 +1,22 @@
1
+ CC = mpicc
2
+ CCFLAGS =
3
+ VLIBS = -L/usr/local/apps/vampirtrace/lib -lVT -lnsl -lm -lpthread
4
+ LIBS = -lm
5
+ OBJS = hw2.o hw2harness.o
6
+
7
+
8
+ all: hw2
9
+
10
+ hw2: $(OBJS)
11
+ $(CC) $(CCFLAGS) -o hw2 $(OBJS) $(LIBS)
12
+
13
+ hw2_v4: $(OBJS)
14
+ $(CC) $(CCFLAGS) -o hw2_v4 $(OBJS) $(VLIBS)
15
+
16
+ hw2_v16: $(OBJS)
17
+ $(CC) $(CCFLAGS) -o hw2_v16 $(OBJS) $(VLIBS)
18
+
19
+ clean:
20
+ -rm $(OBJS)
21
+
22
+ .c.o:; $(CC) $(CCFLAGS) -c $<
Binary file
@@ -0,0 +1,10 @@
1
+ result = neptune :type => "compile",
2
+ #@keyname = "cluster",
3
+ :code => "ring2",
4
+ :main => "Ring.c",
5
+ :output => "/baz",
6
+ #@lang = "x10",
7
+ :copy_to => "ring-compiled"
8
+
9
+ puts "out = #{result[:out]}"
10
+ puts "err = #{result[:err]}"
@@ -0,0 +1,8 @@
1
+ job "compile" do
2
+ #@keyname = "cluster"
3
+ @code = "NQueensDist.x10"
4
+ @output = "/baz"
5
+ @lang = "x10"
6
+ @copy_to = "NQueensCompiled"
7
+ end
8
+
data/samples/mpi/cpi ADDED
Binary file
@@ -0,0 +1,5 @@
1
+
2
+ neptune :type => "output",
3
+ #@keyname = "neptune"
4
+ :output => "/bazoutput"
5
+
@@ -0,0 +1,5 @@
1
+ output = neptune :type => "output",
2
+ :keyname => "booscale1",
3
+ :output => "/baz/output"
4
+
5
+ puts output
data/samples/mpi/hw2.c ADDED
@@ -0,0 +1,205 @@
1
+ #include <stdio.h>
2
+ #include <stdlib.h>
3
+ #include <math.h>
4
+ #include "mpi.h"
5
+ #include "hw2harness.h"
6
+
7
+
8
+ #define NUM_ITERATIONS 10
9
+ #define FROM_MASTER 0
10
+
11
+ #define MASTER(rank) (rank == 0)
12
+ #define SLAVE(rank) (!MASTER)
13
+
14
+ double** A;
15
+ double* x;
16
+ int n, dim;
17
+
18
+ //-----------------------------------------------------------------------
19
+ void getNFromUser() {
20
+ //printf("Please enter n, so that we can create an n x n matrix: ");
21
+ //fflush(stdout);
22
+ //scanf("%d", &n);
23
+
24
+ n = 3072;
25
+
26
+ MPI_Bcast(&n, 1, MPI_INT, FROM_MASTER, MPI_COMM_WORLD);
27
+
28
+ return;
29
+ }
30
+
31
+ //-----------------------------------------------------------------------
32
+ void receiveN() {
33
+ MPI_Bcast(&n, 1, MPI_INT, FROM_MASTER, MPI_COMM_WORLD);
34
+ return;
35
+ }
36
+
37
+ //-----------------------------------------------------------------------
38
+ int generateMatrix(int p, int rank) {
39
+ MPI_Barrier(MPI_COMM_WORLD);
40
+
41
+ if (MASTER(rank)) {
42
+ getNFromUser();
43
+ } else {
44
+ receiveN();
45
+ }
46
+
47
+ MPI_Barrier(MPI_COMM_WORLD);
48
+
49
+ if (n % p != 0) {
50
+ if (MASTER(rank)) {
51
+ printf("n doesn't divide p evenly. Please enter n and try again.\n");
52
+ }
53
+ return(1);
54
+ }
55
+
56
+ int starting = (n/p) * rank + 1;
57
+ int ending = (n/p) * (rank + 1);
58
+ dim = ending - starting + 1;
59
+ //printf("Proc %d: Generating %d rows, %d through %d\n", rank, dim, starting, ending);
60
+
61
+ double* A_tmp = cs240_generateMatrix(n,starting-1,dim);
62
+ int cnt = 0;
63
+
64
+
65
+ A = malloc(dim * sizeof(double*));
66
+ int outer = 0;
67
+ int inner = 0;
68
+ for (outer = 0; outer < dim; outer++) {
69
+ A[outer] = malloc(n * sizeof(double));
70
+ for (inner = 0; inner < n; inner++) {
71
+ //A[outer][inner] = 1.0f;
72
+ A[outer][inner] = A_tmp[cnt];
73
+ cnt++;
74
+ }
75
+ }
76
+
77
+ x = malloc(n * sizeof(double));
78
+ int index = 0;
79
+ for (index = 0; index < n; index++) {
80
+ x[index] = 1;
81
+ }
82
+
83
+ return(0);
84
+ }
85
+
86
+ //-----------------------------------------------------------------------
87
+ double norm() {
88
+ double theNorm = 0;
89
+
90
+ int index = 0;
91
+
92
+ for (index = 0; index < n; index++) {
93
+ theNorm += (x[index] * x[index]);
94
+ }
95
+
96
+ theNorm = sqrt(theNorm);
97
+
98
+ return theNorm;
99
+ }
100
+
101
+ //-----------------------------------------------------------------------
102
+ void matVec(int rank) {
103
+ int index = 0;
104
+ MPI_Bcast(x, n, MPI_DOUBLE, FROM_MASTER, MPI_COMM_WORLD);
105
+ MPI_Barrier(MPI_COMM_WORLD);
106
+
107
+ double* result = malloc(dim * sizeof(double));
108
+ int outer = 0;
109
+ for (outer = 0; outer < dim; outer++) {
110
+ double sum = 0;
111
+ int inner = 0;
112
+ for (inner = 0; inner < n; inner++) {
113
+ sum += (A[outer][inner] * x[inner]);
114
+ }
115
+ result[outer] = sum;
116
+ }
117
+
118
+ MPI_Barrier(MPI_COMM_WORLD);
119
+ MPI_Gather(result, dim, MPI_DOUBLE, x, dim, MPI_DOUBLE, FROM_MASTER, MPI_COMM_WORLD);
120
+ MPI_Barrier(MPI_COMM_WORLD);
121
+ free(result);
122
+ return;
123
+ }
124
+
125
+ //-----------------------------------------------------------------------
126
+ double powerMethod(int rank) {
127
+ MPI_Barrier(MPI_COMM_WORLD);
128
+ double xNorm = 0;
129
+
130
+ int iteration = 0;
131
+ for (iteration = 0; iteration < NUM_ITERATIONS; iteration++) {
132
+ if (MASTER(rank)) {
133
+ xNorm = norm();
134
+ //printf("At iteration %d, the norm of x is %f\n", iteration, xNorm);
135
+
136
+ int index = 0;
137
+ for (index = 0; index < n; index++) {
138
+ x[index] = x[index] / xNorm;
139
+ //printf("x[%d] = %f\n", index, x[index]);
140
+ }
141
+ }
142
+
143
+ MPI_Barrier(MPI_COMM_WORLD);
144
+ matVec(rank);
145
+ MPI_Barrier(MPI_COMM_WORLD);
146
+ }
147
+
148
+ MPI_Barrier(MPI_COMM_WORLD);
149
+ return xNorm;
150
+ }
151
+
152
+ //-----------------------------------------------------------------------
153
+
154
+ int main(int argc, char *argv[])
155
+ {
156
+ int rank, size;
157
+ double start_time,end_time;
158
+ MPI_Init(&argc,&argv);
159
+
160
+ MPI_Comm_size(MPI_COMM_WORLD, &size); // p
161
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
162
+
163
+ MPI_Barrier(MPI_COMM_WORLD);
164
+ int retval = generateMatrix(size, rank);
165
+ if (retval != 0) {
166
+ MPI_Finalize();
167
+ return retval;
168
+ }
169
+ MPI_Barrier(MPI_COMM_WORLD);
170
+ // start timing
171
+ if (MASTER(rank)) {
172
+ start_time = MPI_Wtime();
173
+ }
174
+ double spectralRadius = powerMethod(rank);
175
+ if (MASTER(rank)) {
176
+ end_time = MPI_Wtime();
177
+ }
178
+ // end timing
179
+
180
+ if (MASTER(rank)) {
181
+ printf("The spectral radius is %f\n", spectralRadius);
182
+ printf("It took %f seconds\n", end_time-start_time);
183
+
184
+ /*
185
+ int index = 0;
186
+ for (index = 0; index < n; index++) {
187
+ printf("%f ", x[index]);
188
+ }
189
+ printf("\nsize of n is %d\n", n);
190
+ */
191
+ // checking
192
+ if(cs240_verify(x,n,end_time-start_time)>0){
193
+ printf("yay, we win!\n");
194
+ }else{
195
+ printf("Boo, we lose... again\n");
196
+ }
197
+ }
198
+
199
+
200
+ //printf("calling MPI_Finalize()\n");
201
+ MPI_Finalize();
202
+ return(0);
203
+ }
204
+
205
+ //-----------------------------------------------------------------------
@@ -0,0 +1,84 @@
1
+ /*
2
+ * hw2harness.c
3
+ *
4
+ * I WILL OVERWRITE YOUR COPY OF THIS FILE WITH MY OWN. ANY CHANGES YOU MAKE WILL NOT BE VISIBLE DURING GRADING.
5
+ */
6
+
7
+ #include <stdlib.h>
8
+ #include <math.h>
9
+ #include "hw2harness.h"
10
+
11
+ /*
12
+ Generates a slice of matrix A.
13
+ In grading I may use several different versions of this function to test your code.
14
+
15
+ arguments:
16
+ n = the number of columns (and rows) in A
17
+ startrow = the row to start on
18
+ numrows = the number of rows to generate
19
+
20
+ return value:
21
+ a slice of matrix A in row major order:
22
+ A[index] => A(row, column)
23
+ A[0] => A(1, 1)
24
+ A[1] => A(0, 2)
25
+ A[n] => A(2, 0)
26
+ A[2*n+3] => A(3, 4)
27
+ etc.
28
+
29
+ The reason we don't do a multi-dimensional array is so that multi-row transfers using MPI can be
30
+ accomplished in a single MPI call.
31
+ */
32
+ double* cs240_generateMatrix(int n, int startrow, int numrows) {
33
+ double* A;
34
+ int i;
35
+ int diag;
36
+
37
+ A = (double*)calloc(n*numrows, sizeof(double));
38
+
39
+ for (i = 0; i < numrows; i++) {
40
+ diag = startrow + i;
41
+
42
+ A[i*n + diag] = n;
43
+ }
44
+
45
+ return A;
46
+ }
47
+
48
+ /*
49
+ Call this function at the end of your program. It verifies that the answer you got is correct
50
+ and allows me to have timing results in a convenient format.
51
+
52
+ arguments:
53
+ x = the answer your program came up with
54
+ n = the number of rows and columns of A, and the size of x
55
+ elapsedTime = the time it took to run your power method. Use MPI_Wtime() to get an initial time, then again to get a finishing time.
56
+ elapsedTime = final - initial.
57
+ Please only time your power method, not the entire program.
58
+
59
+ returns:
60
+ 1 if the vector is correct, 0 otherwise.
61
+ */
62
+ int cs240_verify(double* x, int n, double elapsedTime) {
63
+ int i;
64
+ double answer = sqrt(n);
65
+ double diff;
66
+ int correct = 1;
67
+
68
+ for (i = 0; i < n; i++) {
69
+ // make sure each element of the vector x equals sqrt(n).
70
+ // to allow for errors in floating point calculations, close is good enough.
71
+ diff = x[i] - answer;
72
+ if (diff < 0)
73
+ diff = -diff;
74
+
75
+ if (diff > 0.00001) {
76
+ correct = 0;
77
+ break;
78
+ }
79
+ }
80
+
81
+ // I will be using the elapsedTime argument here.
82
+
83
+ return correct;
84
+ }
@@ -0,0 +1,45 @@
1
+ /*
2
+ * hw2harness.h
3
+ *
4
+ * I WILL OVERWRITE YOUR COPY OF THIS FILE WITH MY OWN. ANY CHANGES YOU MAKE WILL NOT BE VISIBLE DURING GRADING.
5
+ *
6
+ */
7
+
8
+ /*
9
+ Generates a slice of matrix A.
10
+ In grading I may use several different versions of this function to test your code.
11
+
12
+ arguments:
13
+ n = the number of columns (and rows) in A
14
+ startrow = the row to start on
15
+ numrows = the number of rows to generate
16
+
17
+ return value:
18
+ a slice of matrix A in row major order:
19
+ A[index] => A(row, column)
20
+ A[0] => A(1, 1)
21
+ A[1] => A(0, 2)
22
+ A[n] => A(2, 0)
23
+ A[2*n+3] => A(3, 4)
24
+ etc.
25
+
26
+ The reason we don't do a multi-dimensional array is so that multi-row transfers using MPI can be
27
+ accomplished in a single MPI call.
28
+ */
29
+ double* cs240_generateMatrix(int n, int startrow, int numrows);
30
+
31
+ /*
32
+ Call this function at the end of your program. It verifies that the answer you got is correct
33
+ and allows me to have timing results in a convenient format.
34
+
35
+ arguments:
36
+ x = the answer your program came up with
37
+ n = the number of rows and columns of A, and the size of x
38
+ elapsedTime = the time it took to run your power method. Use MPI_Wtime() to get an initial time, then again to get a finishing time.
39
+ elapsedTime = final - initial.
40
+ Please only time your power method, not the entire program.
41
+
42
+ returns:
43
+ 1 if the vector is correct, 0 otherwise.
44
+ */
45
+ int cs240_verify(double* x, int n, double elapsedTime);
Binary file
@@ -0,0 +1,2 @@
1
+ all:
2
+ mpicc Ring.c -o Ring -Wall
@@ -0,0 +1,76 @@
1
+ #include <stdio.h>
2
+ #include <stdlib.h>
3
+ #include <math.h>
4
+ #include "mpi.h"
5
+
6
+ #define NUM_MESSAGES 10
7
+ #define FROM_MASTER 0
8
+
9
+ #define MASTER(rank) (rank == 0)
10
+ #define SLAVE(rank) (!MASTER)
11
+
12
+ //-----------------------------------------------------------------------
13
+
14
+ int main(int argc, char *argv[])
15
+ {
16
+ int rank, size;
17
+ double start_time = 0;
18
+ double end_time = 0;
19
+ MPI_Status status;
20
+
21
+ MPI_Init(&argc,&argv);
22
+
23
+ MPI_Comm_size(MPI_COMM_WORLD, &size); // p
24
+ MPI_Comm_rank(MPI_COMM_WORLD, &rank);
25
+
26
+ MPI_Barrier(MPI_COMM_WORLD);
27
+
28
+ // start timing
29
+ if (MASTER(rank)) {
30
+ start_time = MPI_Wtime();
31
+ }
32
+
33
+ char* msg = "hello!";
34
+
35
+ MPI_Barrier(MPI_COMM_WORLD);
36
+
37
+ int index = 0;
38
+ for (index = 0; index < NUM_MESSAGES; index++) {
39
+ if (MASTER(rank)) {
40
+ //printf("master is sending message to node 1\n");
41
+ MPI_Send(&msg, 6, MPI_CHAR, 1, FROM_MASTER, MPI_COMM_WORLD);
42
+
43
+ //printf("master is waiting for message from node %d\n", size - 1);
44
+ MPI_Recv(&msg, 6, MPI_CHAR, size - 1, FROM_MASTER, MPI_COMM_WORLD, &status);
45
+ } else {
46
+ MPI_Recv(&msg, 6, MPI_CHAR, rank-1, FROM_MASTER, MPI_COMM_WORLD, &status);
47
+
48
+ if (rank + 1 == size) {
49
+ //printf("node %d is sending a message to node 0\n", rank);
50
+ MPI_Send(&msg, 6, MPI_CHAR, 0, FROM_MASTER, MPI_COMM_WORLD);
51
+ } else {
52
+ //printf("node %d is sending a message to node %d\n", rank, rank + 1);
53
+ MPI_Send(&msg, 6, MPI_CHAR, rank + 1, FROM_MASTER, MPI_COMM_WORLD);
54
+ }
55
+ }
56
+
57
+ MPI_Barrier(MPI_COMM_WORLD);
58
+ }
59
+
60
+ MPI_Barrier(MPI_COMM_WORLD);
61
+ if (MASTER(rank)) {
62
+ end_time = MPI_Wtime();
63
+ }
64
+ // end timing
65
+
66
+ if (MASTER(rank)) {
67
+ printf("All done sending %d messages between %d nodes!\n", NUM_MESSAGES, size);
68
+ printf("It took %f seconds\n", end_time-start_time);
69
+ }
70
+
71
+ //printf("calling MPI_Finalize()\n");
72
+ MPI_Finalize();
73
+ return(0);
74
+ }
75
+
76
+ //-----------------------------------------------------------------------