ruby-mpi 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
data/.document ADDED
@@ -0,0 +1,5 @@
1
+ lib/**/*.rb
2
+ bin/*
3
+ -
4
+ features/**/*.feature
5
+ LICENSE.txt
data/.rspec ADDED
@@ -0,0 +1 @@
1
+ --color
data/Gemfile ADDED
@@ -0,0 +1,14 @@
1
+ source "http://rubygems.org"
2
+ # Add dependencies required to use your gem here.
3
+ # Example:
4
+ # gem "activesupport", ">= 2.3.5"
5
+
6
+ # Add dependencies to develop your gem here.
7
+ # Include everything needed to run rake, tests, features, etc.
8
+ group :development do
9
+ gem "rspec", "~> 2.3.0"
10
+ gem "bundler", "~> 1.0.0"
11
+ gem "jeweler", "~> 1.5.2"
12
+ gem "rcov", ">= 0"
13
+ gem "narray", ">= 0"
14
+ end
data/Gemfile.lock ADDED
@@ -0,0 +1,30 @@
1
+ GEM
2
+ remote: http://rubygems.org/
3
+ specs:
4
+ diff-lcs (1.1.2)
5
+ git (1.2.5)
6
+ jeweler (1.5.2)
7
+ bundler (~> 1.0.0)
8
+ git (>= 1.2.5)
9
+ rake
10
+ narray (0.5.9.9)
11
+ rake (0.8.7)
12
+ rcov (0.9.9)
13
+ rspec (2.3.0)
14
+ rspec-core (~> 2.3.0)
15
+ rspec-expectations (~> 2.3.0)
16
+ rspec-mocks (~> 2.3.0)
17
+ rspec-core (2.3.1)
18
+ rspec-expectations (2.3.0)
19
+ diff-lcs (~> 1.1.2)
20
+ rspec-mocks (2.3.0)
21
+
22
+ PLATFORMS
23
+ ruby
24
+
25
+ DEPENDENCIES
26
+ bundler (~> 1.0.0)
27
+ jeweler (~> 1.5.2)
28
+ narray
29
+ rcov
30
+ rspec (~> 2.3.0)
data/LICENSE.txt ADDED
@@ -0,0 +1,20 @@
1
+ Copyright (c) 2011 Seiya Nishizawa
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.rdoc ADDED
@@ -0,0 +1,19 @@
1
+ = ruby-mpi
2
+
3
+ Description goes here.
4
+
5
+ == Contributing to ruby-mpi
6
+
7
+ * Check out the latest master to make sure the feature hasn't been implemented or the bug hasn't been fixed yet
8
+ * Check out the issue tracker to make sure someone already hasn't requested it and/or contributed it
9
+ * Fork the project
10
+ * Start a feature/bugfix branch
11
+ * Commit and push until you are happy with your contribution
12
+ * Make sure to add tests for it. This is important so I don't break it in a future version unintentionally.
13
+ * Please try not to mess with the Rakefile, version, or history. If you want to have your own version, or is otherwise necessary, that is fine, but please isolate to its own commit so I can cherry-pick around it.
14
+
15
+ == Copyright
16
+
17
+ Copyright (c) 2011 Seiya Nishizawa. See LICENSE.txt for
18
+ further details.
19
+
data/Rakefile ADDED
@@ -0,0 +1,57 @@
1
+ require 'rubygems'
2
+ require 'bundler'
3
+ require "rake/clean"
4
+ begin
5
+ Bundler.setup(:default, :development)
6
+ rescue Bundler::BundlerError => e
7
+ $stderr.puts e.message
8
+ $stderr.puts "Run `bundle install` to install missing gems"
9
+ exit e.status_code
10
+ end
11
+ require 'rake'
12
+
13
+ require 'jeweler'
14
+ Jeweler::Tasks.new do |gem|
15
+ # gem is a Gem::Specification... see http://docs.rubygems.org/read/chapter/20 for more options
16
+ gem.name = "ruby-mpi"
17
+ gem.homepage = "http://github.com/seiya/ruby-mpi"
18
+ gem.license = "MIT"
19
+ gem.summary = "A ruby binding of MPI"
20
+ gem.description = "A ruby binding of Message Passing Interface (MPI), which is an API specification that allows processes to communicate with one another by sending and receiving messages."
21
+ gem.email = "seiya@gfd-dennou.org"
22
+ gem.authors = ["Seiya Nishizawa"]
23
+ # Include your dependencies below. Runtime dependencies are required when using your gem,
24
+ # and development dependencies are only needed for development (ie running rake tasks, tests, etc)
25
+ # gem.add_runtime_dependency 'jabber4r', '> 0.1'
26
+ # gem.add_development_dependency 'rspec', '> 1.2.3'
27
+ end
28
+ Jeweler::RubygemsDotOrgTasks.new
29
+
30
+ require 'rspec/core'
31
+ require 'rspec/core/rake_task'
32
+ RSpec::Core::RakeTask.new(:spec) do |spec|
33
+ spec.pattern = FileList['spec/**/*_spec.rb']
34
+ end
35
+
36
+ RSpec::Core::RakeTask.new(:rcov) do |spec|
37
+ spec.pattern = 'spec/**/*_spec.rb'
38
+ spec.rcov = true
39
+ end
40
+
41
+ task :default => :spec
42
+
43
+ require 'rake/rdoctask'
44
+ Rake::RDocTask.new do |rdoc|
45
+ version = File.exist?('VERSION') ? File.read('VERSION') : ""
46
+
47
+ rdoc.rdoc_dir = 'rdoc'
48
+ rdoc.title = "ruby-mpi #{version}"
49
+ rdoc.rdoc_files.include('README*')
50
+ rdoc.rdoc_files.include('lib/**/*.rb')
51
+ end
52
+
53
+
54
+ CLEAN.include("ext/mpi/*.o")
55
+ CLEAN.include("ext/mpi/mkmf.log")
56
+ CLOBBER.include("ext/mpi/mpi.so")
57
+ CLOBBER.include("ext/mpi/Makefile")
data/VERSION ADDED
@@ -0,0 +1 @@
1
+ 0.1.0
@@ -0,0 +1,15 @@
1
+ require "mkmf"
2
+
3
+ CONFIG['CC'] = "mpicc"
4
+ gem_path = nil
5
+ begin
6
+ require "rubygems"
7
+ if (spec = Gem.source_index.find_name("narray")).any?
8
+ gem_path = spec.last.full_gem_path
9
+ end
10
+ rescue LoadError
11
+ dir_config("narray", Config::CONFIG["sitearchdir"])
12
+ end
13
+ find_header("narray.h", gem_path)
14
+
15
+ create_makefile("mpi")
data/ext/mpi/mpi.c ADDED
@@ -0,0 +1,656 @@
1
+ #include <stdio.h>
2
+ #include <string.h>
3
+ #include <stdbool.h>
4
+ #include "ruby.h"
5
+ #include "narray.h"
6
+ #include "mpi.h"
7
+
8
+
9
+ #define OBJ2C(rb_obj, len, buffer, typ) \
10
+ {\
11
+ if (TYPE(rb_obj) == T_STRING) {\
12
+ len = RSTRING_LEN(rb_obj);\
13
+ buffer = (void*)StringValuePtr(rb_obj);\
14
+ typ = MPI_CHAR;\
15
+ } else if (IsNArray(rb_obj)) { \
16
+ struct NARRAY *a;\
17
+ GetNArray(rb_obj, a);\
18
+ buffer = (void*)(a->ptr);\
19
+ len = a->total;\
20
+ switch (a->type) {\
21
+ case NA_BYTE:\
22
+ typ = MPI_BYTE;\
23
+ break;\
24
+ case NA_SINT:\
25
+ typ = MPI_SHORT;\
26
+ break;\
27
+ case NA_LINT:\
28
+ typ = MPI_LONG;\
29
+ break;\
30
+ case NA_SFLOAT:\
31
+ typ = MPI_FLOAT;\
32
+ break;\
33
+ case NA_DFLOAT:\
34
+ typ = MPI_DOUBLE;\
35
+ break;\
36
+ case NA_SCOMPLEX:\
37
+ typ = MPI_2COMPLEX;\
38
+ break;\
39
+ case NA_DCOMPLEX:\
40
+ typ = MPI_2DOUBLE_COMPLEX;\
41
+ break;\
42
+ default:\
43
+ rb_raise(rb_eArgError, "narray type is invalid");\
44
+ }\
45
+ } else {\
46
+ rb_raise(rb_eArgError, "Only String and NArray are supported");\
47
+ }\
48
+ }
49
+
50
+ static VALUE mMPI;
51
+ static VALUE cComm, cRequest, cOp, cErrhandler, cStatus;
52
+
53
+ static VALUE eBUFFER, eCOUNT, eTYPE, eTAG, eCOMM, eRANK, eREQUEST, eROOT, eGROUP, eOP, eTOPOLOGY, eDIMS, eARG, eUNKNOWN, eTRUNCATE, eOTHER, eINTERN, eIN_STATUS, ePENDING, eACCESS, eAMODE, eASSERT, eBAD_FILE, eBASE, eCONVERSION, eDISP, eDUP_DATAREP, eFILE_EXISTS, eFILE_IN_USE, eFILE, eINFO_KEY, eINFO_NOKEY, eINFO_VALUE, eINFO, eIO, eKEYVAL, eLOCKTYPE, eNAME, eNO_MEM, eNOT_SAME, eNO_SPACE, eNO_SUCH_FILE, ePORT, eQUOTA, eREAD_ONLY, eRMA_CONFLICT, eRMA_SYNC, eSERVICE, eSIZE, eSPAWN, eUNSUPPORTED_DATAREP, eUNSUPPORTED_OPERATION, eWIN, eLASTCODE, eSYSRESOURCE;
54
+
55
+ struct _Comm {
56
+ MPI_Comm comm;
57
+ };
58
+ struct _Request {
59
+ MPI_Request request;
60
+ };
61
+ struct _Op {
62
+ MPI_Op op;
63
+ };
64
+ struct _Errhandler {
65
+ MPI_Errhandler errhandler;
66
+ };
67
+
68
+ static bool _initialized = false;
69
+ static bool _finalized = false;
70
+
71
+
72
+ #define CAE_ERR(type) case MPI_ERR_ ## type: rb_raise(e ## type,""); break
73
+ static void
74
+ check_error(int error)
75
+ {
76
+ switch (error) {
77
+ case MPI_SUCCESS: break;
78
+ CAE_ERR(BUFFER);
79
+ CAE_ERR(COUNT);
80
+ CAE_ERR(TYPE);
81
+ CAE_ERR(TAG);
82
+ CAE_ERR(COMM);
83
+ CAE_ERR(RANK);
84
+ CAE_ERR(REQUEST);
85
+ CAE_ERR(ROOT);
86
+ CAE_ERR(GROUP);
87
+ CAE_ERR(OP);
88
+ CAE_ERR(TOPOLOGY);
89
+ CAE_ERR(DIMS);
90
+ CAE_ERR(ARG);
91
+ CAE_ERR(UNKNOWN);
92
+ CAE_ERR(TRUNCATE);
93
+ CAE_ERR(OTHER);
94
+ CAE_ERR(INTERN);
95
+ CAE_ERR(IN_STATUS);
96
+ CAE_ERR(PENDING);
97
+ CAE_ERR(ACCESS);
98
+ CAE_ERR(AMODE);
99
+ CAE_ERR(ASSERT);
100
+ CAE_ERR(BAD_FILE);
101
+ CAE_ERR(BASE);
102
+ CAE_ERR(CONVERSION);
103
+ CAE_ERR(DISP);
104
+ CAE_ERR(DUP_DATAREP);
105
+ CAE_ERR(FILE_EXISTS);
106
+ CAE_ERR(FILE_IN_USE);
107
+ CAE_ERR(FILE);
108
+ CAE_ERR(INFO_KEY);
109
+ CAE_ERR(INFO_NOKEY);
110
+ CAE_ERR(INFO_VALUE);
111
+ CAE_ERR(INFO);
112
+ CAE_ERR(IO);
113
+ CAE_ERR(KEYVAL);
114
+ CAE_ERR(LOCKTYPE);
115
+ CAE_ERR(NAME);
116
+ CAE_ERR(NO_MEM);
117
+ CAE_ERR(NOT_SAME);
118
+ CAE_ERR(NO_SPACE);
119
+ CAE_ERR(NO_SUCH_FILE);
120
+ CAE_ERR(PORT);
121
+ CAE_ERR(QUOTA);
122
+ CAE_ERR(READ_ONLY);
123
+ CAE_ERR(RMA_CONFLICT);
124
+ CAE_ERR(RMA_SYNC);
125
+ CAE_ERR(SERVICE);
126
+ CAE_ERR(SIZE);
127
+ CAE_ERR(SPAWN);
128
+ CAE_ERR(UNSUPPORTED_DATAREP);
129
+ CAE_ERR(UNSUPPORTED_OPERATION);
130
+ CAE_ERR(WIN);
131
+ CAE_ERR(LASTCODE);
132
+ CAE_ERR(SYSRESOURCE);
133
+ default:
134
+ rb_raise(rb_eRuntimeError, "unknown error");
135
+ }
136
+ }
137
+
138
+ #define DEF_CONST(st, v, const, name, klass) \
139
+ {\
140
+ v = ALLOC(struct st);\
141
+ v->v = const;\
142
+ rb_define_const(klass, #name, Data_Wrap_Struct(klass, 0, -1, v)); \
143
+ }
144
+
145
+ static VALUE
146
+ rb_m_init(int argc, VALUE *argv, VALUE self)
147
+ {
148
+ VALUE argary;
149
+ int cargc;
150
+ char ** cargv;
151
+ VALUE progname;
152
+ int i;
153
+
154
+ if (_initialized)
155
+ return self;
156
+ else
157
+ _initialized = true;
158
+
159
+ rb_scan_args(argc, argv, "01", &argary);
160
+
161
+ if (NIL_P(argary)) {
162
+ argary = rb_const_get(rb_cObject, rb_intern("ARGV"));
163
+ cargc = RARRAY_LEN(argary);
164
+ } else {
165
+ Check_Type(argary, T_ARRAY);
166
+ cargc = RARRAY_LEN(argary);
167
+ }
168
+
169
+ cargv = ALLOCA_N(char *, cargc+1);
170
+ progname = rb_gv_get("$0");
171
+ cargv[0] = StringValueCStr(progname);
172
+
173
+ for(i=0; i<cargc; i++) {
174
+ if (TYPE(RARRAY_PTR(argary)[i]) == T_STRING)
175
+ cargv[i+1] = StringValueCStr(RARRAY_PTR(argary)[i]);
176
+ else
177
+ cargv[i+1] = (char*)"";
178
+ }
179
+ cargc++;
180
+
181
+ MPI_Init(&cargc, &cargv);
182
+
183
+ // define MPI::Comm::WORLD
184
+ struct _Comm *comm;
185
+ DEF_CONST(_Comm, comm, MPI_COMM_WORLD, WORLD, cComm);
186
+ MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
187
+
188
+ // define MPI::Op::???
189
+ struct _Op *op;
190
+ DEF_CONST(_Op, op, MPI_MAX, MAX, cOp);
191
+ DEF_CONST(_Op, op, MPI_MIN, MIN, cOp);
192
+ DEF_CONST(_Op, op, MPI_SUM, SUM, cOp);
193
+ DEF_CONST(_Op, op, MPI_PROD, PROD, cOp);
194
+ DEF_CONST(_Op, op, MPI_LAND, LAND, cOp);
195
+ DEF_CONST(_Op, op, MPI_BAND, BAND, cOp);
196
+ DEF_CONST(_Op, op, MPI_LOR, LOR, cOp);
197
+ DEF_CONST(_Op, op, MPI_BOR, BOR, cOp);
198
+ DEF_CONST(_Op, op, MPI_LXOR, LXOR, cOp);
199
+ DEF_CONST(_Op, op, MPI_BXOR, BXOR, cOp);
200
+ DEF_CONST(_Op, op, MPI_MAXLOC, MAXLOC, cOp);
201
+ DEF_CONST(_Op, op, MPI_MINLOC, MINLOC, cOp);
202
+ DEF_CONST(_Op, op, MPI_REPLACE, REPLACE, cOp);
203
+
204
+ // define MPI::Errhandler::ERRORS_ARE_FATAL, ERRORS_RETURN
205
+ struct _Errhandler *errhandler;
206
+ DEF_CONST(_Errhandler, errhandler, MPI_ERRORS_ARE_FATAL, ERRORS_ARE_FATAL, cErrhandler);
207
+ DEF_CONST(_Errhandler, errhandler, MPI_ERRORS_RETURN, ERRORS_RETURN, cErrhandler);
208
+
209
+ return self;
210
+ }
211
+
212
+ static void
213
+ _finalize()
214
+ {
215
+ if(_initialized && !_finalized) {
216
+ _finalized = true;
217
+ check_error(MPI_Finalize());
218
+ }
219
+ }
220
+ static VALUE
221
+ rb_m_finalize(VALUE self)
222
+ {
223
+ _finalize();
224
+ return self;
225
+ }
226
+
227
+
228
+ // MPI::Comm
229
+ static VALUE
230
+ rb_comm_alloc(VALUE klass)
231
+ {
232
+ struct _Comm *ptr = ALLOC(struct _Comm);
233
+ return Data_Wrap_Struct(klass, 0, -1, ptr);
234
+ }
235
+ static VALUE
236
+ rb_comm_initialize(VALUE self)
237
+ {
238
+ rb_raise(rb_eRuntimeError, "not developed yet");
239
+ // MPI_Comm_create()
240
+ }
241
+ static VALUE
242
+ rb_comm_size(VALUE self)
243
+ {
244
+ struct _Comm *comm;
245
+ int size;
246
+ Data_Get_Struct(self, struct _Comm, comm);
247
+ check_error(MPI_Comm_size(comm->comm, &size));
248
+ return INT2NUM(size);
249
+ }
250
+ static VALUE
251
+ rb_comm_rank(VALUE self)
252
+ {
253
+ struct _Comm *comm;
254
+ int rank;
255
+ Data_Get_Struct(self, struct _Comm, comm);
256
+ check_error(MPI_Comm_rank(comm->comm, &rank));
257
+ return INT2NUM(rank);
258
+ }
259
+ static VALUE
260
+ rb_comm_send(VALUE self, VALUE rb_obj, VALUE rb_dest, VALUE rb_tag)
261
+ {
262
+ void* buffer;
263
+ int len, dest, tag;
264
+ MPI_Datatype type;
265
+ struct _Comm *comm;
266
+
267
+ OBJ2C(rb_obj, len, buffer, type);
268
+ dest = NUM2INT(rb_dest);
269
+ tag = NUM2INT(rb_tag);
270
+ Data_Get_Struct(self, struct _Comm, comm);
271
+ check_error(MPI_Send(buffer, len, type, dest, tag, comm->comm));
272
+
273
+ return Qnil;
274
+ }
275
+ static VALUE
276
+ rb_comm_isend(VALUE self, VALUE rb_obj, VALUE rb_dest, VALUE rb_tag)
277
+ {
278
+ void* buffer;
279
+ int len, dest, tag;
280
+ MPI_Datatype type;
281
+ struct _Comm *comm;
282
+ struct _Request *request;
283
+ VALUE rb_request;
284
+
285
+ OBJ2C(rb_obj, len, buffer, type);
286
+ dest = NUM2INT(rb_dest);
287
+ tag = NUM2INT(rb_tag);
288
+ Data_Get_Struct(self, struct _Comm, comm);
289
+ rb_request = Data_Make_Struct(cRequest, struct _Request, 0, -1, request);
290
+ check_error(MPI_Isend(buffer, len, type, dest, tag, comm->comm, &(request->request)));
291
+
292
+ return rb_request;
293
+ }
294
+ static VALUE
295
+ rb_comm_recv(VALUE self, VALUE rb_obj, VALUE rb_source, VALUE rb_tag)
296
+ {
297
+ void* buffer;
298
+ int len, source, tag;
299
+ MPI_Datatype type;
300
+ MPI_Status *status;
301
+ struct _Comm *comm;
302
+
303
+ OBJ2C(rb_obj, len, buffer, type);
304
+ source = NUM2INT(rb_source);
305
+ tag = NUM2INT(rb_tag);
306
+
307
+ Data_Get_Struct(self, struct _Comm, comm);
308
+ status = ALLOC(MPI_Status);
309
+ check_error(MPI_Recv(buffer, len, type, source, tag, comm->comm, status));
310
+
311
+ return Data_Wrap_Struct(cStatus, 0, -1, status);
312
+ }
313
+ static VALUE
314
+ rb_comm_irecv(VALUE self, VALUE rb_obj, VALUE rb_source, VALUE rb_tag)
315
+ {
316
+ void* buffer;
317
+ int len, source, tag;
318
+ MPI_Datatype type;
319
+ struct _Comm *comm;
320
+ struct _Request *request;
321
+ VALUE rb_request;
322
+
323
+ OBJ2C(rb_obj, len, buffer, type);
324
+ source = NUM2INT(rb_source);
325
+ tag = NUM2INT(rb_tag);
326
+ Data_Get_Struct(self, struct _Comm, comm);
327
+ rb_request = Data_Make_Struct(cRequest, struct _Request, 0, -1, request);
328
+ check_error(MPI_Irecv(buffer, len, type, source, tag, comm->comm, &(request->request)));
329
+
330
+ return rb_request;
331
+ }
332
+ static VALUE
333
+ rb_comm_gather(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_root)
334
+ {
335
+ void *sendbuf, *recvbuf = NULL;
336
+ int sendcount, recvcount = 0;
337
+ MPI_Datatype sendtype, recvtype = NULL;
338
+ int root, rank, size;
339
+ struct _Comm *comm;
340
+ OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
341
+ root = NUM2INT(rb_root);
342
+ Data_Get_Struct(self, struct _Comm, comm);
343
+ check_error(MPI_Comm_rank(comm->comm, &rank));
344
+ check_error(MPI_Comm_size(comm->comm, &size));
345
+ if (rank == root) {
346
+ OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
347
+ if (recvcount < sendcount*size)
348
+ rb_raise(rb_eArgError, "recvbuf is too small");
349
+ recvcount = sendcount;
350
+ }
351
+ check_error(MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm->comm));
352
+ return Qnil;
353
+ }
354
+ static VALUE
355
+ rb_comm_allgather(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf)
356
+ {
357
+ void *sendbuf, *recvbuf;
358
+ int sendcount, recvcount;
359
+ MPI_Datatype sendtype, recvtype;
360
+ int rank, size;
361
+ struct _Comm *comm;
362
+ OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
363
+ Data_Get_Struct(self, struct _Comm, comm);
364
+ check_error(MPI_Comm_rank(comm->comm, &rank));
365
+ check_error(MPI_Comm_size(comm->comm, &size));
366
+ OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
367
+ if (recvcount < sendcount*size)
368
+ rb_raise(rb_eArgError, "recvbuf is too small");
369
+ recvcount = sendcount;
370
+ check_error(MPI_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm->comm));
371
+ return Qnil;
372
+ }
373
+ static VALUE
374
+ rb_comm_bcast(VALUE self, VALUE rb_buffer, VALUE rb_root)
375
+ {
376
+ void *buffer;
377
+ int count;
378
+ MPI_Datatype type;
379
+ int root;
380
+ struct _Comm *comm;
381
+ OBJ2C(rb_buffer, count, buffer, type);
382
+ root = NUM2INT(rb_root);
383
+ Data_Get_Struct(self, struct _Comm, comm);
384
+ check_error(MPI_Bcast(buffer, count, type, root, comm->comm));
385
+ return Qnil;
386
+ }
387
+ static VALUE
388
+ rb_comm_scatter(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_root)
389
+ {
390
+ void *sendbuf = NULL, *recvbuf;
391
+ int sendcount = 0, recvcount;
392
+ MPI_Datatype sendtype = NULL, recvtype;
393
+ int root, rank, size;
394
+ struct _Comm *comm;
395
+ OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
396
+ root = NUM2INT(rb_root);
397
+ Data_Get_Struct(self, struct _Comm, comm);
398
+ check_error(MPI_Comm_rank(comm->comm, &rank));
399
+ check_error(MPI_Comm_size(comm->comm, &size));
400
+ if (rank == root) {
401
+ OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
402
+ if (sendcount > recvcount*size)
403
+ rb_raise(rb_eArgError, "recvbuf is too small");
404
+ sendcount = recvcount;
405
+ }
406
+ check_error(MPI_Scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm->comm));
407
+ return Qnil;
408
+ }
409
+ static VALUE
410
+ rb_comm_alltoall(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf)
411
+ {
412
+ void *sendbuf, *recvbuf;
413
+ int sendcount, recvcount;
414
+ MPI_Datatype sendtype, recvtype;
415
+ int rank, size;
416
+ struct _Comm *comm;
417
+ OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
418
+ Data_Get_Struct(self, struct _Comm, comm);
419
+ check_error(MPI_Comm_rank(comm->comm, &rank));
420
+ check_error(MPI_Comm_size(comm->comm, &size));
421
+ OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
422
+ if (recvcount < sendcount)
423
+ rb_raise(rb_eArgError, "recvbuf is too small");
424
+ recvcount = recvcount/size;
425
+ sendcount = sendcount/size;
426
+ check_error(MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm->comm));
427
+ return Qnil;
428
+ }
429
+ static VALUE
430
+ rb_comm_reduce(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_op, VALUE rb_root)
431
+ {
432
+ void *sendbuf, *recvbuf = NULL;
433
+ int sendcount, recvcount = 0;
434
+ MPI_Datatype sendtype, recvtype = NULL;
435
+ int root, rank, size;
436
+ struct _Comm *comm;
437
+ struct _Op *op;
438
+ OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
439
+ root = NUM2INT(rb_root);
440
+ Data_Get_Struct(self, struct _Comm, comm);
441
+ check_error(MPI_Comm_rank(comm->comm, &rank));
442
+ check_error(MPI_Comm_size(comm->comm, &size));
443
+ if (rank == root) {
444
+ OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
445
+ if (recvcount != sendcount)
446
+ rb_raise(rb_eArgError, "sendbuf and recvbuf has the same length");
447
+ if (recvtype != sendtype)
448
+ rb_raise(rb_eArgError, "sendbuf and recvbuf has the same type");
449
+ }
450
+ Data_Get_Struct(rb_op, struct _Op, op);
451
+ check_error(MPI_Reduce(sendbuf, recvbuf, sendcount, sendtype, op->op, root, comm->comm));
452
+ return Qnil;
453
+ }
454
+ static VALUE
455
+ rb_comm_allreduce(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_op)
456
+ {
457
+ void *sendbuf, *recvbuf;
458
+ int sendcount, recvcount;
459
+ MPI_Datatype sendtype, recvtype;
460
+ int rank, size;
461
+ struct _Comm *comm;
462
+ struct _Op *op;
463
+ OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
464
+ Data_Get_Struct(self, struct _Comm, comm);
465
+ check_error(MPI_Comm_rank(comm->comm, &rank));
466
+ check_error(MPI_Comm_size(comm->comm, &size));
467
+ OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
468
+ if (recvcount != sendcount)
469
+ rb_raise(rb_eArgError, "sendbuf and recvbuf has the same length");
470
+ if (recvtype != sendtype)
471
+ rb_raise(rb_eArgError, "sendbuf and recvbuf has the same type");
472
+ Data_Get_Struct(rb_op, struct _Op, op);
473
+ check_error(MPI_Allreduce(sendbuf, recvbuf, recvcount, recvtype, op->op, comm->comm));
474
+ return Qnil;
475
+ }
476
+ static VALUE
477
+ rb_comm_get_Errhandler(VALUE self)
478
+ {
479
+ struct _Comm *comm;
480
+ struct _Errhandler *errhandler;
481
+ VALUE rb_errhandler;
482
+
483
+ Data_Get_Struct(self, struct _Comm, comm);
484
+ rb_errhandler = Data_Make_Struct(cErrhandler, struct _Errhandler, 0, -1, errhandler);
485
+ MPI_Comm_get_errhandler(comm->comm, &(errhandler->errhandler));
486
+ return rb_errhandler;
487
+ }
488
+ static VALUE
489
+ rb_comm_set_Errhandler(VALUE self, VALUE rb_errhandler)
490
+ {
491
+ struct _Comm *comm;
492
+ struct _Errhandler *errhandler;
493
+
494
+ Data_Get_Struct(self, struct _Comm, comm);
495
+ Data_Get_Struct(rb_errhandler, struct _Errhandler, errhandler);
496
+ MPI_Comm_set_errhandler(comm->comm, errhandler->errhandler);
497
+ return self;
498
+ }
499
+
500
+ // MPI::Request
501
+ static VALUE
502
+ rb_request_wait(VALUE self)
503
+ {
504
+ MPI_Status *status;
505
+ struct _Request *request;
506
+ Data_Get_Struct(self, struct _Request, request);
507
+ status = ALLOC(MPI_Status);
508
+ check_error(MPI_Wait(&(request->request), status));
509
+ return Data_Wrap_Struct(cStatus, 0, -1, status);
510
+ }
511
+
512
+ // MPI::Errhandler
513
+ static VALUE
514
+ rb_errhandler_eql(VALUE self, VALUE other)
515
+ {
516
+ struct _Errhandler *eh0, *eh1;
517
+ Data_Get_Struct(self, struct _Errhandler, eh0);
518
+ Data_Get_Struct(other, struct _Errhandler, eh1);
519
+ return eh0->errhandler == eh1->errhandler ? Qtrue : Qfalse;
520
+ }
521
+
522
+ // MPI::Status
523
+ static VALUE
524
+ rb_status_source(VALUE self)
525
+ {
526
+ MPI_Status *status;
527
+ Data_Get_Struct(self, MPI_Status, status);
528
+ return INT2NUM(status->MPI_SOURCE);
529
+ }
530
+ static VALUE
531
+ rb_status_tag(VALUE self)
532
+ {
533
+ MPI_Status *status;
534
+ Data_Get_Struct(self, MPI_Status, status);
535
+ return INT2NUM(status->MPI_TAG);
536
+ }
537
+ static VALUE
538
+ rb_status_error(VALUE self)
539
+ {
540
+ MPI_Status *status;
541
+ Data_Get_Struct(self, MPI_Status, status);
542
+ return INT2NUM(status->MPI_ERROR);
543
+ }
544
+
545
+
546
+ void Init_mpi()
547
+ {
548
+
549
+ rb_require("narray");
550
+
551
+ atexit(_finalize);
552
+
553
+ // MPI
554
+ mMPI = rb_define_module("MPI");
555
+ rb_define_module_function(mMPI, "Init", rb_m_init, -1);
556
+ rb_define_module_function(mMPI, "Finalize", rb_m_finalize, -1);
557
+ rb_define_const(mMPI, "VERSION", INT2NUM(MPI_VERSION));
558
+ rb_define_const(mMPI, "SUBVERSION", INT2NUM(MPI_SUBVERSION));
559
+ rb_define_const(mMPI, "SUCCESS", INT2NUM(MPI_SUCCESS));
560
+
561
+ // MPI::Comm
562
+ cComm = rb_define_class_under(mMPI, "Comm", rb_cObject);
563
+ // rb_define_alloc_func(cComm, rb_comm_alloc);
564
+ rb_define_private_method(cComm, "initialize", rb_comm_initialize, 0);
565
+ rb_define_method(cComm, "rank", rb_comm_rank, 0);
566
+ rb_define_method(cComm, "size", rb_comm_size, 0);
567
+ rb_define_method(cComm, "Send", rb_comm_send, 3);
568
+ rb_define_method(cComm, "Isend", rb_comm_isend, 3);
569
+ rb_define_method(cComm, "Recv", rb_comm_recv, 3);
570
+ rb_define_method(cComm, "Irecv", rb_comm_irecv, 3);
571
+ rb_define_method(cComm, "Gather", rb_comm_gather, 3);
572
+ rb_define_method(cComm, "Allgather", rb_comm_allgather, 2);
573
+ rb_define_method(cComm, "Bcast", rb_comm_bcast, 2);
574
+ rb_define_method(cComm, "Scatter", rb_comm_scatter, 3);
575
+ rb_define_method(cComm, "Alltoall", rb_comm_alltoall, 2);
576
+ rb_define_method(cComm, "Reduce", rb_comm_reduce, 4);
577
+ rb_define_method(cComm, "Allreduce", rb_comm_allreduce, 3);
578
+ rb_define_method(cComm, "Errhandler", rb_comm_get_Errhandler, 0);
579
+ rb_define_method(cComm, "Errhandler=", rb_comm_set_Errhandler, 1);
580
+
581
+ // MPI::Request
582
+ cRequest = rb_define_class_under(mMPI, "Request", rb_cObject);
583
+ rb_define_method(cRequest, "Wait", rb_request_wait, 0);
584
+
585
+ // MPI::Op
586
+ cOp = rb_define_class_under(mMPI, "Op", rb_cObject);
587
+
588
+ // MPI::Errhandler
589
+ cErrhandler = rb_define_class_under(mMPI, "Errhandler", rb_cObject);
590
+ rb_define_method(cErrhandler, "eql?", rb_errhandler_eql, 1);
591
+
592
+ // MPI::Status
593
+ cStatus = rb_define_class_under(mMPI, "Status", rb_cObject);
594
+ rb_define_method(cStatus, "source", rb_status_source, 0);
595
+ rb_define_method(cStatus, "tag", rb_status_tag, 0);
596
+ rb_define_method(cStatus, "error", rb_status_error, 0);
597
+
598
+
599
+ //MPI::ERR
600
+ VALUE mERR = rb_define_module_under(mMPI, "ERR");
601
+ eBUFFER = rb_define_class_under(mERR, "BUFFER", rb_eStandardError);
602
+ eCOUNT = rb_define_class_under(mERR, "COUNT", rb_eStandardError);
603
+ eTYPE = rb_define_class_under(mERR, "TYPE", rb_eStandardError);
604
+ eTAG = rb_define_class_under(mERR, "TAG", rb_eStandardError);
605
+ eCOMM = rb_define_class_under(mERR, "COMM", rb_eStandardError);
606
+ eRANK = rb_define_class_under(mERR, "RANK", rb_eStandardError);
607
+ eREQUEST = rb_define_class_under(mERR, "REQUEST", rb_eStandardError);
608
+ eROOT = rb_define_class_under(mERR, "ROOT", rb_eStandardError);
609
+ eGROUP = rb_define_class_under(mERR, "GROUP", rb_eStandardError);
610
+ eOP = rb_define_class_under(mERR, "OP", rb_eStandardError);
611
+ eTOPOLOGY = rb_define_class_under(mERR, "TOPOLOGY", rb_eStandardError);
612
+ eDIMS = rb_define_class_under(mERR, "DIMS", rb_eStandardError);
613
+ eARG = rb_define_class_under(mERR, "ARG", rb_eStandardError);
614
+ eUNKNOWN = rb_define_class_under(mERR, "UNKNOWN", rb_eStandardError);
615
+ eTRUNCATE = rb_define_class_under(mERR, "TRUNCATE", rb_eStandardError);
616
+ eOTHER = rb_define_class_under(mERR, "OTHER", rb_eStandardError);
617
+ eINTERN = rb_define_class_under(mERR, "INTERN", rb_eStandardError);
618
+ eIN_STATUS = rb_define_class_under(mERR, "IN_STATUS", rb_eStandardError);
619
+ ePENDING = rb_define_class_under(mERR, "PENDING", rb_eStandardError);
620
+ eACCESS = rb_define_class_under(mERR, "ACCESS", rb_eStandardError);
621
+ eAMODE = rb_define_class_under(mERR, "AMODE", rb_eStandardError);
622
+ eASSERT = rb_define_class_under(mERR, "ASSERT", rb_eStandardError);
623
+ eBAD_FILE = rb_define_class_under(mERR, "BAD_FILE", rb_eStandardError);
624
+ eBASE = rb_define_class_under(mERR, "BASE", rb_eStandardError);
625
+ eCONVERSION = rb_define_class_under(mERR, "CONVERSION", rb_eStandardError);
626
+ eDISP = rb_define_class_under(mERR, "DISP", rb_eStandardError);
627
+ eDUP_DATAREP = rb_define_class_under(mERR, "DUP_DATAREP", rb_eStandardError);
628
+ eFILE_EXISTS = rb_define_class_under(mERR, "FILE_EXISTS", rb_eStandardError);
629
+ eFILE_IN_USE = rb_define_class_under(mERR, "FILE_IN_USE", rb_eStandardError);
630
+ eFILE = rb_define_class_under(mERR, "FILE", rb_eStandardError);
631
+ eINFO_KEY = rb_define_class_under(mERR, "INFO_KEY", rb_eStandardError);
632
+ eINFO_NOKEY = rb_define_class_under(mERR, "INFO_NOKEY", rb_eStandardError);
633
+ eINFO_VALUE = rb_define_class_under(mERR, "INFO_VALUE", rb_eStandardError);
634
+ eINFO = rb_define_class_under(mERR, "INFO", rb_eStandardError);
635
+ eIO = rb_define_class_under(mERR, "IO", rb_eStandardError);
636
+ eKEYVAL = rb_define_class_under(mERR, "KEYVAL", rb_eStandardError);
637
+ eLOCKTYPE = rb_define_class_under(mERR, "LOCKTYPE", rb_eStandardError);
638
+ eNAME = rb_define_class_under(mERR, "NAME", rb_eStandardError);
639
+ eNO_MEM = rb_define_class_under(mERR, "NO_MEM", rb_eStandardError);
640
+ eNOT_SAME = rb_define_class_under(mERR, "NOT_SAME", rb_eStandardError);
641
+ eNO_SPACE = rb_define_class_under(mERR, "NO_SPACE", rb_eStandardError);
642
+ eNO_SUCH_FILE = rb_define_class_under(mERR, "NO_SUCH_FILE", rb_eStandardError);
643
+ ePORT = rb_define_class_under(mERR, "PORT", rb_eStandardError);
644
+ eQUOTA = rb_define_class_under(mERR, "QUOTA", rb_eStandardError);
645
+ eREAD_ONLY = rb_define_class_under(mERR, "READ_ONLY", rb_eStandardError);
646
+ eRMA_CONFLICT = rb_define_class_under(mERR, "RMA_CONFLICT", rb_eStandardError);
647
+ eRMA_SYNC = rb_define_class_under(mERR, "RMA_SYNC", rb_eStandardError);
648
+ eSERVICE = rb_define_class_under(mERR, "SERVICE", rb_eStandardError);
649
+ eSIZE = rb_define_class_under(mERR, "SIZE", rb_eStandardError);
650
+ eSPAWN = rb_define_class_under(mERR, "SPAWN", rb_eStandardError);
651
+ eUNSUPPORTED_DATAREP = rb_define_class_under(mERR, "UNSUPPORTED_DATAREP", rb_eStandardError);
652
+ eUNSUPPORTED_OPERATION = rb_define_class_under(mERR, "UNSUPPORTED_OPERATION", rb_eStandardError);
653
+ eWIN = rb_define_class_under(mERR, "WIN", rb_eStandardError);
654
+ eLASTCODE = rb_define_class_under(mERR, "LASTCODE", rb_eStandardError);
655
+ eSYSRESOURCE = rb_define_class_under(mERR, "SYSRESOURCE", rb_eStandardError);
656
+ }
data/lib/mpi.rb ADDED
@@ -0,0 +1,6 @@
1
+ begin
2
+ require "rubygems"
3
+ rescue LoadError
4
+ end
5
+ require "narray"
6
+ require "mpi.so"
data/ruby-mpi.gemspec ADDED
@@ -0,0 +1,73 @@
1
+ # Generated by jeweler
2
+ # DO NOT EDIT THIS FILE DIRECTLY
3
+ # Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
4
+ # -*- encoding: utf-8 -*-
5
+
6
+ Gem::Specification.new do |s|
7
+ s.name = %q{ruby-mpi}
8
+ s.version = "0.1.0"
9
+
10
+ s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
11
+ s.authors = ["Seiya Nishizawa"]
12
+ s.date = %q{2011-04-21}
13
+ s.description = %q{A ruby binding of Message Passing Interface (MPI), which is an API specification that allows processes to communicate with one another by sending and receiving messages.}
14
+ s.email = %q{seiya@gfd-dennou.org}
15
+ s.extensions = ["ext/mpi/extconf.rb"]
16
+ s.extra_rdoc_files = [
17
+ "LICENSE.txt",
18
+ "README.rdoc"
19
+ ]
20
+ s.files = [
21
+ ".document",
22
+ ".rspec",
23
+ "Gemfile",
24
+ "Gemfile.lock",
25
+ "LICENSE.txt",
26
+ "README.rdoc",
27
+ "Rakefile",
28
+ "VERSION",
29
+ "ext/mpi/extconf.rb",
30
+ "ext/mpi/mpi.c",
31
+ "lib/mpi.rb",
32
+ "ruby-mpi.gemspec",
33
+ "samples/hello.rb",
34
+ "samples/narray.rb",
35
+ "spec/ruby-mpi_spec.rb",
36
+ "spec/spec_helper.rb"
37
+ ]
38
+ s.homepage = %q{http://github.com/seiya/ruby-mpi}
39
+ s.licenses = ["MIT"]
40
+ s.require_paths = ["lib"]
41
+ s.rubygems_version = %q{1.3.7}
42
+ s.summary = %q{A ruby binding of MPI}
43
+ s.test_files = [
44
+ "spec/ruby-mpi_spec.rb",
45
+ "spec/spec_helper.rb"
46
+ ]
47
+
48
+ if s.respond_to? :specification_version then
49
+ current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
50
+ s.specification_version = 3
51
+
52
+ if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
53
+ s.add_development_dependency(%q<rspec>, ["~> 2.3.0"])
54
+ s.add_development_dependency(%q<bundler>, ["~> 1.0.0"])
55
+ s.add_development_dependency(%q<jeweler>, ["~> 1.5.2"])
56
+ s.add_development_dependency(%q<rcov>, [">= 0"])
57
+ s.add_development_dependency(%q<narray>, [">= 0"])
58
+ else
59
+ s.add_dependency(%q<rspec>, ["~> 2.3.0"])
60
+ s.add_dependency(%q<bundler>, ["~> 1.0.0"])
61
+ s.add_dependency(%q<jeweler>, ["~> 1.5.2"])
62
+ s.add_dependency(%q<rcov>, [">= 0"])
63
+ s.add_dependency(%q<narray>, [">= 0"])
64
+ end
65
+ else
66
+ s.add_dependency(%q<rspec>, ["~> 2.3.0"])
67
+ s.add_dependency(%q<bundler>, ["~> 1.0.0"])
68
+ s.add_dependency(%q<jeweler>, ["~> 1.5.2"])
69
+ s.add_dependency(%q<rcov>, [">= 0"])
70
+ s.add_dependency(%q<narray>, [">= 0"])
71
+ end
72
+ end
73
+
data/samples/hello.rb ADDED
@@ -0,0 +1,21 @@
1
+ require "mpi"
2
+
3
+ MPI.Init
4
+
5
+
6
+ world = MPI::Comm::WORLD
7
+ rank = world.rank
8
+
9
+ if rank == 0
10
+ (world.size-1).times do |i|
11
+ str ="\x00"*100
12
+ world.Recv(str, i+1, 0)
13
+ p str
14
+ end
15
+ else
16
+ message = "Hello from #{rank}"
17
+ world.Send(message, 0, 0)
18
+ end
19
+
20
+
21
+ MPI.Finalize
data/samples/narray.rb ADDED
@@ -0,0 +1,20 @@
1
+ require "mpi"
2
+
3
+ MPI.Init
4
+
5
+
6
+ world = MPI::Comm::WORLD
7
+ rank = world.rank
8
+
9
+ if rank == 0
10
+ (world.size-1).times do |i|
11
+ a = NArray.float(2)
12
+ world.Recv(a, i+1, 1)
13
+ p a
14
+ end
15
+ else
16
+ world.Send(NArray[1.0,2], 0, 1)
17
+ end
18
+
19
+
20
+ MPI.Finalize
@@ -0,0 +1,192 @@
1
+ require File.expand_path(File.dirname(__FILE__) + '/spec_helper')
2
+
3
+ describe "MPI" do
4
+ before(:all) do
5
+ MPI.Init()
6
+ end
7
+ after(:all) do
8
+ MPI.Finalize()
9
+ end
10
+
11
+ it "should give version" do
12
+ MPI.constants.should include("VERSION")
13
+ MPI.constants.should include("SUBVERSION")
14
+ MPI::VERSION.class.should eql(Fixnum)
15
+ MPI::SUBVERSION.class.should eql(Fixnum)
16
+ end
17
+
18
+ it "should have Comm:WORLD" do
19
+ MPI::Comm.constants.should include("WORLD")
20
+ world = MPI::Comm::WORLD
21
+ world.rank.class.should eql(Fixnum)
22
+ world.size.class.should eql(Fixnum)
23
+ world.size.should > 0
24
+ end
25
+
26
+ it "should send and receive String" do
27
+ world = MPI::Comm::WORLD
28
+ message = "Hello from #{world.rank}"
29
+ tag = 0
30
+ world.Send(message, 0, tag)
31
+ if world.rank == 0
32
+ world.size.times do |i|
33
+ str = " "*30
34
+ status = world.Recv(str, i, tag)
35
+ status.source.should eql(i)
36
+ status.tag.should eql(tag)
37
+ status.error.should eq(MPI::SUCCESS)
38
+ str.should match(/\AHello from #{i}+/)
39
+ end
40
+ end
41
+ end
42
+
43
+ it "should send and receive NArray" do
44
+ world = MPI::Comm::WORLD
45
+ tag = 0
46
+ [NArray[1,2,3], NArray[3.0,2.0,1.0]].each do |ary0|
47
+ ary0 = NArray[1,2,3]
48
+ world.Send(ary0, 0, tag)
49
+ if world.rank == 0
50
+ world.size.times do |i|
51
+ ary1 = NArray.new(ary0.typecode, ary0.total)
52
+ status = world.Recv(ary1, i, tag)
53
+ status.source.should eql(i)
54
+ status.tag.should eql(tag)
55
+ status.error.should eq(MPI::SUCCESS)
56
+ ary1.should == ary0
57
+ end
58
+ end
59
+ end
60
+ end
61
+
62
+ it "should send and receive without blocking" do
63
+ world = MPI::Comm::WORLD
64
+ message = "Hello from #{world.rank}"
65
+ tag = 0
66
+ request = world.Isend(message, 0, tag)
67
+ status = request.Wait
68
+ status.source.should eql(world.rank)
69
+ status.tag.should eql(tag)
70
+ if world.rank == 0
71
+ world.size.times do |i|
72
+ str = " "*30
73
+ request = world.Irecv(str, i, tag)
74
+ status = request.Wait
75
+ status.source.should eql(i)
76
+ status.tag.should eql(tag)
77
+ str.should match(/\AHello from #{i}+/)
78
+ end
79
+ end
80
+ end
81
+
82
+ it "should gather data" do
83
+ world = MPI::Comm::WORLD
84
+ rank = world.rank
85
+ size = world.size
86
+ root = 0
87
+ bufsize = 2
88
+ sendbuf = rank.to_s*bufsize
89
+ recvbuf = rank == root ? "?"*bufsize*size : nil
90
+ world.Gather(sendbuf, recvbuf, root)
91
+ if rank == root
92
+ str = ""
93
+ size.times{|i| str << i.to_s*bufsize}
94
+ recvbuf.should eql(str)
95
+ end
96
+ end
97
+
98
+ it "should gather data to all processes (allgather)" do
99
+ world = MPI::Comm::WORLD
100
+ rank = world.rank
101
+ size = world.size
102
+ bufsize = 2
103
+ sendbuf = rank.to_s*bufsize
104
+ recvbuf = "?"*bufsize*size
105
+ world.Allgather(sendbuf, recvbuf)
106
+ str = ""
107
+ size.times{|i| str << i.to_s*bufsize}
108
+ recvbuf.should eql(str)
109
+ end
110
+
111
+ it "should broad cast data (bcast)" do
112
+ world = MPI::Comm::WORLD
113
+ rank = world.rank
114
+ root = 0
115
+ bufsize = 2
116
+ if rank == root
117
+ buffer = rank.to_s*bufsize
118
+ else
119
+ buffer = " "*bufsize
120
+ end
121
+ world.Bcast(buffer, root)
122
+ buffer.should eql(root.to_s*bufsize)
123
+ end
124
+
125
+ it "should scatter data" do
126
+ world = MPI::Comm::WORLD
127
+ rank = world.rank
128
+ size = world.size
129
+ root = 0
130
+ bufsize = 2
131
+ if rank == root
132
+ sendbuf = ""
133
+ size.times{|i| sendbuf << i.to_s*bufsize}
134
+ else
135
+ sendbuf = nil
136
+ end
137
+ recvbuf = " "*bufsize
138
+ world.Scatter(sendbuf, recvbuf, root)
139
+ recvbuf.should eql(rank.to_s*bufsize)
140
+ end
141
+
142
+ it "should change data between each others (alltoall)" do
143
+ world = MPI::Comm::WORLD
144
+ rank = world.rank
145
+ size = world.size
146
+ bufsize = 2
147
+ sendbuf = rank.to_s*bufsize*size
148
+ recvbuf = "?"*bufsize*size
149
+ world.Alltoall(sendbuf, recvbuf)
150
+ str = ""
151
+ size.times{|i| str << i.to_s*bufsize}
152
+ recvbuf.should eql(str)
153
+ end
154
+
155
+ it "should reduce data" do
156
+ world = MPI::Comm::WORLD
157
+ rank = world.rank
158
+ size = world.size
159
+ root = 0
160
+ bufsize = 2
161
+ sendbuf = NArray.to_na([rank]*bufsize)
162
+ recvbuf = rank == root ? NArray.new(sendbuf.typecode,bufsize) : nil
163
+ world.Reduce(sendbuf, recvbuf, MPI::Op::SUM, root)
164
+ if rank == root
165
+ ary = NArray.new(sendbuf.typecode,bufsize).fill(size*(size-1)/2.0)
166
+ recvbuf.should == ary
167
+ end
168
+ end
169
+
170
+ it "should reduce data and send to all processes (allreduce)" do
171
+ world = MPI::Comm::WORLD
172
+ rank = world.rank
173
+ size = world.size
174
+ bufsize = 2
175
+ sendbuf = NArray.to_na([rank]*bufsize)
176
+ recvbuf = NArray.new(sendbuf.typecode,bufsize)
177
+ world.Allreduce(sendbuf, recvbuf, MPI::Op::SUM)
178
+ ary = NArray.new(sendbuf.typecode,bufsize).fill(size*(size-1)/2.0)
179
+ recvbuf.should == ary
180
+ end
181
+
182
+
183
+
184
+
185
+ it "shoud raise exeption" do
186
+ world = MPI::Comm::WORLD
187
+ lambda{ world.Send("", -1, 0) }.should raise_error(MPI::ERR::RANK)
188
+ lambda{ world.Send("", world.size+1, 0) }.should raise_error(MPI::ERR::RANK)
189
+ world.Errhandler.should eql(MPI::Errhandler::ERRORS_RETURN)
190
+ end
191
+
192
+ end
@@ -0,0 +1,13 @@
1
+ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'ext', 'mpi'))
2
+ $LOAD_PATH.unshift(File.join(File.dirname(__FILE__), '..', 'lib'))
3
+ $LOAD_PATH.unshift(File.dirname(__FILE__))
4
+ require 'rspec'
5
+ require 'mpi'
6
+
7
+ # Requires supporting files with custom matchers and macros, etc,
8
+ # in ./support/ and its subdirectories.
9
+ Dir["#{File.dirname(__FILE__)}/support/**/*.rb"].each {|f| require f}
10
+
11
+ RSpec.configure do |config|
12
+
13
+ end
metadata ADDED
@@ -0,0 +1,159 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: ruby-mpi
3
+ version: !ruby/object:Gem::Version
4
+ hash: 27
5
+ prerelease: false
6
+ segments:
7
+ - 0
8
+ - 1
9
+ - 0
10
+ version: 0.1.0
11
+ platform: ruby
12
+ authors:
13
+ - Seiya Nishizawa
14
+ autorequire:
15
+ bindir: bin
16
+ cert_chain: []
17
+
18
+ date: 2011-04-21 00:00:00 +09:00
19
+ default_executable:
20
+ dependencies:
21
+ - !ruby/object:Gem::Dependency
22
+ prerelease: false
23
+ name: rspec
24
+ version_requirements: &id001 !ruby/object:Gem::Requirement
25
+ none: false
26
+ requirements:
27
+ - - ~>
28
+ - !ruby/object:Gem::Version
29
+ hash: 3
30
+ segments:
31
+ - 2
32
+ - 3
33
+ - 0
34
+ version: 2.3.0
35
+ requirement: *id001
36
+ type: :development
37
+ - !ruby/object:Gem::Dependency
38
+ prerelease: false
39
+ name: bundler
40
+ version_requirements: &id002 !ruby/object:Gem::Requirement
41
+ none: false
42
+ requirements:
43
+ - - ~>
44
+ - !ruby/object:Gem::Version
45
+ hash: 23
46
+ segments:
47
+ - 1
48
+ - 0
49
+ - 0
50
+ version: 1.0.0
51
+ requirement: *id002
52
+ type: :development
53
+ - !ruby/object:Gem::Dependency
54
+ prerelease: false
55
+ name: jeweler
56
+ version_requirements: &id003 !ruby/object:Gem::Requirement
57
+ none: false
58
+ requirements:
59
+ - - ~>
60
+ - !ruby/object:Gem::Version
61
+ hash: 7
62
+ segments:
63
+ - 1
64
+ - 5
65
+ - 2
66
+ version: 1.5.2
67
+ requirement: *id003
68
+ type: :development
69
+ - !ruby/object:Gem::Dependency
70
+ prerelease: false
71
+ name: rcov
72
+ version_requirements: &id004 !ruby/object:Gem::Requirement
73
+ none: false
74
+ requirements:
75
+ - - ">="
76
+ - !ruby/object:Gem::Version
77
+ hash: 3
78
+ segments:
79
+ - 0
80
+ version: "0"
81
+ requirement: *id004
82
+ type: :development
83
+ - !ruby/object:Gem::Dependency
84
+ prerelease: false
85
+ name: narray
86
+ version_requirements: &id005 !ruby/object:Gem::Requirement
87
+ none: false
88
+ requirements:
89
+ - - ">="
90
+ - !ruby/object:Gem::Version
91
+ hash: 3
92
+ segments:
93
+ - 0
94
+ version: "0"
95
+ requirement: *id005
96
+ type: :development
97
+ description: A ruby binding of Message Passing Interface (MPI), which is an API specification that allows processes to communicate with one another by sending and receiving messages.
98
+ email: seiya@gfd-dennou.org
99
+ executables: []
100
+
101
+ extensions:
102
+ - ext/mpi/extconf.rb
103
+ extra_rdoc_files:
104
+ - LICENSE.txt
105
+ - README.rdoc
106
+ files:
107
+ - .document
108
+ - .rspec
109
+ - Gemfile
110
+ - Gemfile.lock
111
+ - LICENSE.txt
112
+ - README.rdoc
113
+ - Rakefile
114
+ - VERSION
115
+ - ext/mpi/extconf.rb
116
+ - ext/mpi/mpi.c
117
+ - lib/mpi.rb
118
+ - ruby-mpi.gemspec
119
+ - samples/hello.rb
120
+ - samples/narray.rb
121
+ - spec/ruby-mpi_spec.rb
122
+ - spec/spec_helper.rb
123
+ has_rdoc: true
124
+ homepage: http://github.com/seiya/ruby-mpi
125
+ licenses:
126
+ - MIT
127
+ post_install_message:
128
+ rdoc_options: []
129
+
130
+ require_paths:
131
+ - lib
132
+ required_ruby_version: !ruby/object:Gem::Requirement
133
+ none: false
134
+ requirements:
135
+ - - ">="
136
+ - !ruby/object:Gem::Version
137
+ hash: 3
138
+ segments:
139
+ - 0
140
+ version: "0"
141
+ required_rubygems_version: !ruby/object:Gem::Requirement
142
+ none: false
143
+ requirements:
144
+ - - ">="
145
+ - !ruby/object:Gem::Version
146
+ hash: 3
147
+ segments:
148
+ - 0
149
+ version: "0"
150
+ requirements: []
151
+
152
+ rubyforge_project:
153
+ rubygems_version: 1.3.7
154
+ signing_key:
155
+ specification_version: 3
156
+ summary: A ruby binding of MPI
157
+ test_files:
158
+ - spec/ruby-mpi_spec.rb
159
+ - spec/spec_helper.rb