ruby-mpi 0.1.0 → 0.2.0
Sign up to get free protection for your applications and to get access to all the features.
- data/README.rdoc +7 -3
- data/VERSION +1 -1
- data/ext/mpi/mpi.c +134 -78
- data/ruby-mpi.gemspec +3 -4
- data/spec/ruby-mpi_spec.rb +83 -68
- metadata +20 -51
data/README.rdoc
CHANGED
@@ -1,8 +1,12 @@
|
|
1
|
-
=
|
1
|
+
= Ruby-MPI
|
2
2
|
|
3
|
-
|
3
|
+
Ruby-MPI is a ruby binding of Message Passing Interface (MPI), which is an API specification that allows processes to communicate with one another by sending and receiving messages.
|
4
4
|
|
5
|
-
==
|
5
|
+
== Install
|
6
|
+
|
7
|
+
# gem install ruby-mpi
|
8
|
+
|
9
|
+
== Contributing to Ruby-MPI
|
6
10
|
|
7
11
|
* Check out the latest master to make sure the feature hasn't been implemented or the bug hasn't been fixed yet
|
8
12
|
* Check out the issue tracker to make sure someone already hasn't requested it and/or contributed it
|
data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
0.
|
1
|
+
0.2.0
|
data/ext/mpi/mpi.c
CHANGED
@@ -53,22 +53,43 @@ static VALUE cComm, cRequest, cOp, cErrhandler, cStatus;
|
|
53
53
|
static VALUE eBUFFER, eCOUNT, eTYPE, eTAG, eCOMM, eRANK, eREQUEST, eROOT, eGROUP, eOP, eTOPOLOGY, eDIMS, eARG, eUNKNOWN, eTRUNCATE, eOTHER, eINTERN, eIN_STATUS, ePENDING, eACCESS, eAMODE, eASSERT, eBAD_FILE, eBASE, eCONVERSION, eDISP, eDUP_DATAREP, eFILE_EXISTS, eFILE_IN_USE, eFILE, eINFO_KEY, eINFO_NOKEY, eINFO_VALUE, eINFO, eIO, eKEYVAL, eLOCKTYPE, eNAME, eNO_MEM, eNOT_SAME, eNO_SPACE, eNO_SUCH_FILE, ePORT, eQUOTA, eREAD_ONLY, eRMA_CONFLICT, eRMA_SYNC, eSERVICE, eSIZE, eSPAWN, eUNSUPPORTED_DATAREP, eUNSUPPORTED_OPERATION, eWIN, eLASTCODE, eSYSRESOURCE;
|
54
54
|
|
55
55
|
struct _Comm {
|
56
|
-
MPI_Comm
|
56
|
+
MPI_Comm Comm;
|
57
57
|
};
|
58
58
|
struct _Request {
|
59
|
-
MPI_Request
|
59
|
+
MPI_Request Request;
|
60
60
|
};
|
61
61
|
struct _Op {
|
62
|
-
MPI_Op
|
62
|
+
MPI_Op Op;
|
63
63
|
};
|
64
64
|
struct _Errhandler {
|
65
|
-
MPI_Errhandler
|
65
|
+
MPI_Errhandler Errhandler;
|
66
66
|
};
|
67
67
|
|
68
68
|
static bool _initialized = false;
|
69
69
|
static bool _finalized = false;
|
70
70
|
|
71
71
|
|
72
|
+
#define DEF_FREE(name) \
|
73
|
+
static void \
|
74
|
+
name ## _free(void *ptr)\
|
75
|
+
{\
|
76
|
+
struct _ ## name *obj;\
|
77
|
+
obj = (struct _ ## name*) ptr;\
|
78
|
+
if (!_finalized)\
|
79
|
+
MPI_ ## name ## _free(&(obj->name)); \
|
80
|
+
free(obj);\
|
81
|
+
}
|
82
|
+
DEF_FREE(Comm)
|
83
|
+
DEF_FREE(Request)
|
84
|
+
DEF_FREE(Op)
|
85
|
+
DEF_FREE(Errhandler)
|
86
|
+
static void
|
87
|
+
Status_free(void *ptr)
|
88
|
+
{
|
89
|
+
free((MPI_Status*) ptr);
|
90
|
+
}
|
91
|
+
|
92
|
+
|
72
93
|
#define CAE_ERR(type) case MPI_ERR_ ## type: rb_raise(e ## type,""); break
|
73
94
|
static void
|
74
95
|
check_error(int error)
|
@@ -129,19 +150,29 @@ check_error(int error)
|
|
129
150
|
CAE_ERR(UNSUPPORTED_OPERATION);
|
130
151
|
CAE_ERR(WIN);
|
131
152
|
CAE_ERR(LASTCODE);
|
153
|
+
#ifdef MPI_ERR_SYSRESOURCE
|
132
154
|
CAE_ERR(SYSRESOURCE);
|
155
|
+
#endif
|
133
156
|
default:
|
134
157
|
rb_raise(rb_eRuntimeError, "unknown error");
|
135
158
|
}
|
136
159
|
}
|
137
160
|
|
138
|
-
#define DEF_CONST(
|
161
|
+
#define DEF_CONST(v, const, name) \
|
139
162
|
{\
|
140
|
-
v = ALLOC(struct
|
163
|
+
v = ALLOC(struct _ ## v);\
|
141
164
|
v->v = const;\
|
142
|
-
rb_define_const(
|
165
|
+
rb_define_const(c ## v, #name, Data_Wrap_Struct(c ## v, NULL, v ## _free, v)); \
|
143
166
|
}
|
144
167
|
|
168
|
+
static void
|
169
|
+
_finalize()
|
170
|
+
{
|
171
|
+
if(_initialized && !_finalized) {
|
172
|
+
_finalized = true;
|
173
|
+
check_error(MPI_Finalize());
|
174
|
+
}
|
175
|
+
}
|
145
176
|
static VALUE
|
146
177
|
rb_m_init(int argc, VALUE *argv, VALUE self)
|
147
178
|
{
|
@@ -151,11 +182,6 @@ rb_m_init(int argc, VALUE *argv, VALUE self)
|
|
151
182
|
VALUE progname;
|
152
183
|
int i;
|
153
184
|
|
154
|
-
if (_initialized)
|
155
|
-
return self;
|
156
|
-
else
|
157
|
-
_initialized = true;
|
158
|
-
|
159
185
|
rb_scan_args(argc, argv, "01", &argary);
|
160
186
|
|
161
187
|
if (NIL_P(argary)) {
|
@@ -178,45 +204,44 @@ rb_m_init(int argc, VALUE *argv, VALUE self)
|
|
178
204
|
}
|
179
205
|
cargc++;
|
180
206
|
|
181
|
-
MPI_Init(&cargc, &cargv);
|
207
|
+
check_error(MPI_Init(&cargc, &cargv));
|
208
|
+
if (_initialized)
|
209
|
+
return self;
|
210
|
+
else
|
211
|
+
_initialized = true;
|
212
|
+
atexit(_finalize);
|
213
|
+
|
214
|
+
|
182
215
|
|
183
216
|
// define MPI::Comm::WORLD
|
184
|
-
struct _Comm *
|
185
|
-
DEF_CONST(
|
217
|
+
struct _Comm *Comm;
|
218
|
+
DEF_CONST(Comm, MPI_COMM_WORLD, WORLD);
|
186
219
|
MPI_Errhandler_set(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
|
187
220
|
|
188
221
|
// define MPI::Op::???
|
189
|
-
struct _Op *
|
190
|
-
DEF_CONST(
|
191
|
-
DEF_CONST(
|
192
|
-
DEF_CONST(
|
193
|
-
DEF_CONST(
|
194
|
-
DEF_CONST(
|
195
|
-
DEF_CONST(
|
196
|
-
DEF_CONST(
|
197
|
-
DEF_CONST(
|
198
|
-
DEF_CONST(
|
199
|
-
DEF_CONST(
|
200
|
-
DEF_CONST(
|
201
|
-
DEF_CONST(
|
202
|
-
DEF_CONST(
|
222
|
+
struct _Op *Op;
|
223
|
+
DEF_CONST(Op, MPI_MAX, MAX);
|
224
|
+
DEF_CONST(Op, MPI_MIN, MIN);
|
225
|
+
DEF_CONST(Op, MPI_SUM, SUM);
|
226
|
+
DEF_CONST(Op, MPI_PROD, PROD);
|
227
|
+
DEF_CONST(Op, MPI_LAND, LAND);
|
228
|
+
DEF_CONST(Op, MPI_BAND, BAND);
|
229
|
+
DEF_CONST(Op, MPI_LOR, LOR);
|
230
|
+
DEF_CONST(Op, MPI_BOR, BOR);
|
231
|
+
DEF_CONST(Op, MPI_LXOR, LXOR);
|
232
|
+
DEF_CONST(Op, MPI_BXOR, BXOR);
|
233
|
+
DEF_CONST(Op, MPI_MAXLOC, MAXLOC);
|
234
|
+
DEF_CONST(Op, MPI_MINLOC, MINLOC);
|
235
|
+
DEF_CONST(Op, MPI_REPLACE, REPLACE);
|
203
236
|
|
204
237
|
// define MPI::Errhandler::ERRORS_ARE_FATAL, ERRORS_RETURN
|
205
|
-
struct _Errhandler *
|
206
|
-
DEF_CONST(
|
207
|
-
DEF_CONST(
|
238
|
+
struct _Errhandler *Errhandler;
|
239
|
+
DEF_CONST(Errhandler, MPI_ERRORS_ARE_FATAL, ERRORS_ARE_FATAL);
|
240
|
+
DEF_CONST(Errhandler, MPI_ERRORS_RETURN, ERRORS_RETURN);
|
208
241
|
|
209
242
|
return self;
|
210
243
|
}
|
211
244
|
|
212
|
-
static void
|
213
|
-
_finalize()
|
214
|
-
{
|
215
|
-
if(_initialized && !_finalized) {
|
216
|
-
_finalized = true;
|
217
|
-
check_error(MPI_Finalize());
|
218
|
-
}
|
219
|
-
}
|
220
245
|
static VALUE
|
221
246
|
rb_m_finalize(VALUE self)
|
222
247
|
{
|
@@ -230,7 +255,7 @@ static VALUE
|
|
230
255
|
rb_comm_alloc(VALUE klass)
|
231
256
|
{
|
232
257
|
struct _Comm *ptr = ALLOC(struct _Comm);
|
233
|
-
return Data_Wrap_Struct(klass,
|
258
|
+
return Data_Wrap_Struct(klass, NULL, Comm_free, ptr);
|
234
259
|
}
|
235
260
|
static VALUE
|
236
261
|
rb_comm_initialize(VALUE self)
|
@@ -244,7 +269,7 @@ rb_comm_size(VALUE self)
|
|
244
269
|
struct _Comm *comm;
|
245
270
|
int size;
|
246
271
|
Data_Get_Struct(self, struct _Comm, comm);
|
247
|
-
check_error(MPI_Comm_size(comm->
|
272
|
+
check_error(MPI_Comm_size(comm->Comm, &size));
|
248
273
|
return INT2NUM(size);
|
249
274
|
}
|
250
275
|
static VALUE
|
@@ -253,7 +278,7 @@ rb_comm_rank(VALUE self)
|
|
253
278
|
struct _Comm *comm;
|
254
279
|
int rank;
|
255
280
|
Data_Get_Struct(self, struct _Comm, comm);
|
256
|
-
check_error(MPI_Comm_rank(comm->
|
281
|
+
check_error(MPI_Comm_rank(comm->Comm, &rank));
|
257
282
|
return INT2NUM(rank);
|
258
283
|
}
|
259
284
|
static VALUE
|
@@ -268,7 +293,7 @@ rb_comm_send(VALUE self, VALUE rb_obj, VALUE rb_dest, VALUE rb_tag)
|
|
268
293
|
dest = NUM2INT(rb_dest);
|
269
294
|
tag = NUM2INT(rb_tag);
|
270
295
|
Data_Get_Struct(self, struct _Comm, comm);
|
271
|
-
check_error(MPI_Send(buffer, len, type, dest, tag, comm->
|
296
|
+
check_error(MPI_Send(buffer, len, type, dest, tag, comm->Comm));
|
272
297
|
|
273
298
|
return Qnil;
|
274
299
|
}
|
@@ -286,8 +311,8 @@ rb_comm_isend(VALUE self, VALUE rb_obj, VALUE rb_dest, VALUE rb_tag)
|
|
286
311
|
dest = NUM2INT(rb_dest);
|
287
312
|
tag = NUM2INT(rb_tag);
|
288
313
|
Data_Get_Struct(self, struct _Comm, comm);
|
289
|
-
rb_request = Data_Make_Struct(cRequest, struct _Request,
|
290
|
-
check_error(MPI_Isend(buffer, len, type, dest, tag, comm->
|
314
|
+
rb_request = Data_Make_Struct(cRequest, struct _Request, NULL, Request_free, request);
|
315
|
+
check_error(MPI_Isend(buffer, len, type, dest, tag, comm->Comm, &(request->Request)));
|
291
316
|
|
292
317
|
return rb_request;
|
293
318
|
}
|
@@ -306,9 +331,9 @@ rb_comm_recv(VALUE self, VALUE rb_obj, VALUE rb_source, VALUE rb_tag)
|
|
306
331
|
|
307
332
|
Data_Get_Struct(self, struct _Comm, comm);
|
308
333
|
status = ALLOC(MPI_Status);
|
309
|
-
check_error(MPI_Recv(buffer, len, type, source, tag, comm->
|
334
|
+
check_error(MPI_Recv(buffer, len, type, source, tag, comm->Comm, status));
|
310
335
|
|
311
|
-
return Data_Wrap_Struct(cStatus,
|
336
|
+
return Data_Wrap_Struct(cStatus, NULL, Status_free, status);
|
312
337
|
}
|
313
338
|
static VALUE
|
314
339
|
rb_comm_irecv(VALUE self, VALUE rb_obj, VALUE rb_source, VALUE rb_tag)
|
@@ -324,8 +349,8 @@ rb_comm_irecv(VALUE self, VALUE rb_obj, VALUE rb_source, VALUE rb_tag)
|
|
324
349
|
source = NUM2INT(rb_source);
|
325
350
|
tag = NUM2INT(rb_tag);
|
326
351
|
Data_Get_Struct(self, struct _Comm, comm);
|
327
|
-
rb_request = Data_Make_Struct(cRequest, struct _Request,
|
328
|
-
check_error(MPI_Irecv(buffer, len, type, source, tag, comm->
|
352
|
+
rb_request = Data_Make_Struct(cRequest, struct _Request, NULL, Request_free, request);
|
353
|
+
check_error(MPI_Irecv(buffer, len, type, source, tag, comm->Comm, &(request->Request)));
|
329
354
|
|
330
355
|
return rb_request;
|
331
356
|
}
|
@@ -340,15 +365,15 @@ rb_comm_gather(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_root)
|
|
340
365
|
OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
|
341
366
|
root = NUM2INT(rb_root);
|
342
367
|
Data_Get_Struct(self, struct _Comm, comm);
|
343
|
-
check_error(MPI_Comm_rank(comm->
|
344
|
-
check_error(MPI_Comm_size(comm->
|
368
|
+
check_error(MPI_Comm_rank(comm->Comm, &rank));
|
369
|
+
check_error(MPI_Comm_size(comm->Comm, &size));
|
345
370
|
if (rank == root) {
|
346
371
|
OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
|
347
372
|
if (recvcount < sendcount*size)
|
348
373
|
rb_raise(rb_eArgError, "recvbuf is too small");
|
349
374
|
recvcount = sendcount;
|
350
375
|
}
|
351
|
-
check_error(MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm->
|
376
|
+
check_error(MPI_Gather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm->Comm));
|
352
377
|
return Qnil;
|
353
378
|
}
|
354
379
|
static VALUE
|
@@ -361,13 +386,13 @@ rb_comm_allgather(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf)
|
|
361
386
|
struct _Comm *comm;
|
362
387
|
OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
|
363
388
|
Data_Get_Struct(self, struct _Comm, comm);
|
364
|
-
check_error(MPI_Comm_rank(comm->
|
365
|
-
check_error(MPI_Comm_size(comm->
|
389
|
+
check_error(MPI_Comm_rank(comm->Comm, &rank));
|
390
|
+
check_error(MPI_Comm_size(comm->Comm, &size));
|
366
391
|
OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
|
367
392
|
if (recvcount < sendcount*size)
|
368
393
|
rb_raise(rb_eArgError, "recvbuf is too small");
|
369
394
|
recvcount = sendcount;
|
370
|
-
check_error(MPI_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm->
|
395
|
+
check_error(MPI_Allgather(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm->Comm));
|
371
396
|
return Qnil;
|
372
397
|
}
|
373
398
|
static VALUE
|
@@ -381,7 +406,7 @@ rb_comm_bcast(VALUE self, VALUE rb_buffer, VALUE rb_root)
|
|
381
406
|
OBJ2C(rb_buffer, count, buffer, type);
|
382
407
|
root = NUM2INT(rb_root);
|
383
408
|
Data_Get_Struct(self, struct _Comm, comm);
|
384
|
-
check_error(MPI_Bcast(buffer, count, type, root, comm->
|
409
|
+
check_error(MPI_Bcast(buffer, count, type, root, comm->Comm));
|
385
410
|
return Qnil;
|
386
411
|
}
|
387
412
|
static VALUE
|
@@ -395,35 +420,57 @@ rb_comm_scatter(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_root)
|
|
395
420
|
OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
|
396
421
|
root = NUM2INT(rb_root);
|
397
422
|
Data_Get_Struct(self, struct _Comm, comm);
|
398
|
-
check_error(MPI_Comm_rank(comm->
|
399
|
-
check_error(MPI_Comm_size(comm->
|
423
|
+
check_error(MPI_Comm_rank(comm->Comm, &rank));
|
424
|
+
check_error(MPI_Comm_size(comm->Comm, &size));
|
400
425
|
if (rank == root) {
|
401
426
|
OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
|
402
427
|
if (sendcount > recvcount*size)
|
403
428
|
rb_raise(rb_eArgError, "recvbuf is too small");
|
404
429
|
sendcount = recvcount;
|
405
430
|
}
|
406
|
-
check_error(MPI_Scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm->
|
431
|
+
check_error(MPI_Scatter(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm->Comm));
|
407
432
|
return Qnil;
|
408
433
|
}
|
409
434
|
static VALUE
|
435
|
+
rb_comm_sendrecv(VALUE self, VALUE rb_sendbuf, VALUE rb_dest, VALUE rb_sendtag, VALUE rb_recvbuf, VALUE rb_source, VALUE rb_recvtag)
|
436
|
+
{
|
437
|
+
void *sendbuf, *recvbuf;
|
438
|
+
int sendcount, recvcount;
|
439
|
+
MPI_Datatype sendtype, recvtype;
|
440
|
+
int dest, source;
|
441
|
+
int sendtag, recvtag;
|
442
|
+
int size;
|
443
|
+
struct _Comm *comm;
|
444
|
+
MPI_Status *status;
|
445
|
+
OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
|
446
|
+
Data_Get_Struct(self, struct _Comm, comm);
|
447
|
+
check_error(MPI_Comm_size(comm->Comm, &size));
|
448
|
+
OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
|
449
|
+
dest = NUM2INT(rb_dest);
|
450
|
+
source = NUM2INT(rb_source);
|
451
|
+
sendtag = NUM2INT(rb_sendtag);
|
452
|
+
recvtag = NUM2INT(rb_recvtag);
|
453
|
+
status = ALLOC(MPI_Status);
|
454
|
+
check_error(MPI_Sendrecv(sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, recvcount, recvtype, source, recvtag, comm->Comm, status));
|
455
|
+
return Data_Wrap_Struct(cStatus, NULL, Status_free, status);
|
456
|
+
}
|
457
|
+
static VALUE
|
410
458
|
rb_comm_alltoall(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf)
|
411
459
|
{
|
412
460
|
void *sendbuf, *recvbuf;
|
413
461
|
int sendcount, recvcount;
|
414
462
|
MPI_Datatype sendtype, recvtype;
|
415
|
-
int
|
463
|
+
int size;
|
416
464
|
struct _Comm *comm;
|
417
465
|
OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
|
418
466
|
Data_Get_Struct(self, struct _Comm, comm);
|
419
|
-
check_error(
|
420
|
-
check_error(MPI_Comm_size(comm->comm, &size));
|
467
|
+
check_error(MPI_Comm_size(comm->Comm, &size));
|
421
468
|
OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
|
422
469
|
if (recvcount < sendcount)
|
423
470
|
rb_raise(rb_eArgError, "recvbuf is too small");
|
424
471
|
recvcount = recvcount/size;
|
425
472
|
sendcount = sendcount/size;
|
426
|
-
check_error(MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm->
|
473
|
+
check_error(MPI_Alltoall(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm->Comm));
|
427
474
|
return Qnil;
|
428
475
|
}
|
429
476
|
static VALUE
|
@@ -438,8 +485,8 @@ rb_comm_reduce(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_op, VALU
|
|
438
485
|
OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
|
439
486
|
root = NUM2INT(rb_root);
|
440
487
|
Data_Get_Struct(self, struct _Comm, comm);
|
441
|
-
check_error(MPI_Comm_rank(comm->
|
442
|
-
check_error(MPI_Comm_size(comm->
|
488
|
+
check_error(MPI_Comm_rank(comm->Comm, &rank));
|
489
|
+
check_error(MPI_Comm_size(comm->Comm, &size));
|
443
490
|
if (rank == root) {
|
444
491
|
OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
|
445
492
|
if (recvcount != sendcount)
|
@@ -448,7 +495,7 @@ rb_comm_reduce(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_op, VALU
|
|
448
495
|
rb_raise(rb_eArgError, "sendbuf and recvbuf has the same type");
|
449
496
|
}
|
450
497
|
Data_Get_Struct(rb_op, struct _Op, op);
|
451
|
-
check_error(MPI_Reduce(sendbuf, recvbuf, sendcount, sendtype, op->
|
498
|
+
check_error(MPI_Reduce(sendbuf, recvbuf, sendcount, sendtype, op->Op, root, comm->Comm));
|
452
499
|
return Qnil;
|
453
500
|
}
|
454
501
|
static VALUE
|
@@ -462,15 +509,15 @@ rb_comm_allreduce(VALUE self, VALUE rb_sendbuf, VALUE rb_recvbuf, VALUE rb_op)
|
|
462
509
|
struct _Op *op;
|
463
510
|
OBJ2C(rb_sendbuf, sendcount, sendbuf, sendtype);
|
464
511
|
Data_Get_Struct(self, struct _Comm, comm);
|
465
|
-
check_error(MPI_Comm_rank(comm->
|
466
|
-
check_error(MPI_Comm_size(comm->
|
512
|
+
check_error(MPI_Comm_rank(comm->Comm, &rank));
|
513
|
+
check_error(MPI_Comm_size(comm->Comm, &size));
|
467
514
|
OBJ2C(rb_recvbuf, recvcount, recvbuf, recvtype);
|
468
515
|
if (recvcount != sendcount)
|
469
516
|
rb_raise(rb_eArgError, "sendbuf and recvbuf has the same length");
|
470
517
|
if (recvtype != sendtype)
|
471
518
|
rb_raise(rb_eArgError, "sendbuf and recvbuf has the same type");
|
472
519
|
Data_Get_Struct(rb_op, struct _Op, op);
|
473
|
-
check_error(MPI_Allreduce(sendbuf, recvbuf, recvcount, recvtype, op->
|
520
|
+
check_error(MPI_Allreduce(sendbuf, recvbuf, recvcount, recvtype, op->Op, comm->Comm));
|
474
521
|
return Qnil;
|
475
522
|
}
|
476
523
|
static VALUE
|
@@ -481,8 +528,8 @@ rb_comm_get_Errhandler(VALUE self)
|
|
481
528
|
VALUE rb_errhandler;
|
482
529
|
|
483
530
|
Data_Get_Struct(self, struct _Comm, comm);
|
484
|
-
rb_errhandler = Data_Make_Struct(cErrhandler, struct _Errhandler,
|
485
|
-
MPI_Comm_get_errhandler(comm->
|
531
|
+
rb_errhandler = Data_Make_Struct(cErrhandler, struct _Errhandler, NULL, Errhandler_free, errhandler);
|
532
|
+
MPI_Comm_get_errhandler(comm->Comm, &(errhandler->Errhandler));
|
486
533
|
return rb_errhandler;
|
487
534
|
}
|
488
535
|
static VALUE
|
@@ -493,7 +540,15 @@ rb_comm_set_Errhandler(VALUE self, VALUE rb_errhandler)
|
|
493
540
|
|
494
541
|
Data_Get_Struct(self, struct _Comm, comm);
|
495
542
|
Data_Get_Struct(rb_errhandler, struct _Errhandler, errhandler);
|
496
|
-
MPI_Comm_set_errhandler(comm->
|
543
|
+
MPI_Comm_set_errhandler(comm->Comm, errhandler->Errhandler);
|
544
|
+
return self;
|
545
|
+
}
|
546
|
+
static VALUE
|
547
|
+
rb_comm_barrier(VALUE self)
|
548
|
+
{
|
549
|
+
struct _Comm *comm;
|
550
|
+
Data_Get_Struct(self, struct _Comm, comm);
|
551
|
+
check_error(MPI_Barrier(comm->Comm));
|
497
552
|
return self;
|
498
553
|
}
|
499
554
|
|
@@ -505,8 +560,8 @@ rb_request_wait(VALUE self)
|
|
505
560
|
struct _Request *request;
|
506
561
|
Data_Get_Struct(self, struct _Request, request);
|
507
562
|
status = ALLOC(MPI_Status);
|
508
|
-
check_error(MPI_Wait(&(request->
|
509
|
-
return Data_Wrap_Struct(cStatus,
|
563
|
+
check_error(MPI_Wait(&(request->Request), status));
|
564
|
+
return Data_Wrap_Struct(cStatus, NULL, Status_free, status);
|
510
565
|
}
|
511
566
|
|
512
567
|
// MPI::Errhandler
|
@@ -516,7 +571,7 @@ rb_errhandler_eql(VALUE self, VALUE other)
|
|
516
571
|
struct _Errhandler *eh0, *eh1;
|
517
572
|
Data_Get_Struct(self, struct _Errhandler, eh0);
|
518
573
|
Data_Get_Struct(other, struct _Errhandler, eh1);
|
519
|
-
return eh0->
|
574
|
+
return eh0->Errhandler == eh1->Errhandler ? Qtrue : Qfalse;
|
520
575
|
}
|
521
576
|
|
522
577
|
// MPI::Status
|
@@ -548,8 +603,6 @@ void Init_mpi()
|
|
548
603
|
|
549
604
|
rb_require("narray");
|
550
605
|
|
551
|
-
atexit(_finalize);
|
552
|
-
|
553
606
|
// MPI
|
554
607
|
mMPI = rb_define_module("MPI");
|
555
608
|
rb_define_module_function(mMPI, "Init", rb_m_init, -1);
|
@@ -557,6 +610,7 @@ void Init_mpi()
|
|
557
610
|
rb_define_const(mMPI, "VERSION", INT2NUM(MPI_VERSION));
|
558
611
|
rb_define_const(mMPI, "SUBVERSION", INT2NUM(MPI_SUBVERSION));
|
559
612
|
rb_define_const(mMPI, "SUCCESS", INT2NUM(MPI_SUCCESS));
|
613
|
+
rb_define_const(mMPI, "PROC_NULL", INT2NUM(MPI_PROC_NULL));
|
560
614
|
|
561
615
|
// MPI::Comm
|
562
616
|
cComm = rb_define_class_under(mMPI, "Comm", rb_cObject);
|
@@ -572,11 +626,13 @@ void Init_mpi()
|
|
572
626
|
rb_define_method(cComm, "Allgather", rb_comm_allgather, 2);
|
573
627
|
rb_define_method(cComm, "Bcast", rb_comm_bcast, 2);
|
574
628
|
rb_define_method(cComm, "Scatter", rb_comm_scatter, 3);
|
629
|
+
rb_define_method(cComm, "Sendrecv", rb_comm_sendrecv, 6);
|
575
630
|
rb_define_method(cComm, "Alltoall", rb_comm_alltoall, 2);
|
576
631
|
rb_define_method(cComm, "Reduce", rb_comm_reduce, 4);
|
577
632
|
rb_define_method(cComm, "Allreduce", rb_comm_allreduce, 3);
|
578
633
|
rb_define_method(cComm, "Errhandler", rb_comm_get_Errhandler, 0);
|
579
634
|
rb_define_method(cComm, "Errhandler=", rb_comm_set_Errhandler, 1);
|
635
|
+
rb_define_method(cComm, "Barrier", rb_comm_barrier, 0);
|
580
636
|
|
581
637
|
// MPI::Request
|
582
638
|
cRequest = rb_define_class_under(mMPI, "Request", rb_cObject);
|
data/ruby-mpi.gemspec
CHANGED
@@ -5,11 +5,11 @@
|
|
5
5
|
|
6
6
|
Gem::Specification.new do |s|
|
7
7
|
s.name = %q{ruby-mpi}
|
8
|
-
s.version = "0.
|
8
|
+
s.version = "0.2.0"
|
9
9
|
|
10
10
|
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
|
11
11
|
s.authors = ["Seiya Nishizawa"]
|
12
|
-
s.date = %q{2011-04-
|
12
|
+
s.date = %q{2011-04-22}
|
13
13
|
s.description = %q{A ruby binding of Message Passing Interface (MPI), which is an API specification that allows processes to communicate with one another by sending and receiving messages.}
|
14
14
|
s.email = %q{seiya@gfd-dennou.org}
|
15
15
|
s.extensions = ["ext/mpi/extconf.rb"]
|
@@ -38,7 +38,7 @@ Gem::Specification.new do |s|
|
|
38
38
|
s.homepage = %q{http://github.com/seiya/ruby-mpi}
|
39
39
|
s.licenses = ["MIT"]
|
40
40
|
s.require_paths = ["lib"]
|
41
|
-
s.rubygems_version = %q{1.
|
41
|
+
s.rubygems_version = %q{1.7.2}
|
42
42
|
s.summary = %q{A ruby binding of MPI}
|
43
43
|
s.test_files = [
|
44
44
|
"spec/ruby-mpi_spec.rb",
|
@@ -46,7 +46,6 @@ Gem::Specification.new do |s|
|
|
46
46
|
]
|
47
47
|
|
48
48
|
if s.respond_to? :specification_version then
|
49
|
-
current_version = Gem::Specification::CURRENT_SPECIFICATION_VERSION
|
50
49
|
s.specification_version = 3
|
51
50
|
|
52
51
|
if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then
|
data/spec/ruby-mpi_spec.rb
CHANGED
@@ -8,49 +8,49 @@ describe "MPI" do
|
|
8
8
|
MPI.Finalize()
|
9
9
|
end
|
10
10
|
|
11
|
+
before do
|
12
|
+
@world = MPI::Comm::WORLD
|
13
|
+
end
|
14
|
+
|
11
15
|
it "should give version" do
|
12
|
-
MPI.constants.should include("VERSION")
|
13
|
-
MPI.constants.should include("SUBVERSION")
|
14
16
|
MPI::VERSION.class.should eql(Fixnum)
|
15
17
|
MPI::SUBVERSION.class.should eql(Fixnum)
|
16
18
|
end
|
17
19
|
|
18
|
-
it "should
|
19
|
-
|
20
|
-
world
|
21
|
-
world.
|
22
|
-
world.size.class.should eql(Fixnum)
|
23
|
-
world.size.should > 0
|
20
|
+
it "should give rank and size" do
|
21
|
+
@world.rank.class.should eql(Fixnum)
|
22
|
+
@world.size.class.should eql(Fixnum)
|
23
|
+
@world.size.should > 0
|
24
24
|
end
|
25
25
|
|
26
26
|
it "should send and receive String" do
|
27
|
-
|
28
|
-
message = "Hello from #{
|
27
|
+
rank = @world.rank
|
28
|
+
message = "Hello from #{rank}"
|
29
29
|
tag = 0
|
30
|
-
world.Send(message, 0, tag)
|
31
|
-
if
|
32
|
-
world.size.times do |i|
|
33
|
-
str = " "*
|
34
|
-
status = world.Recv(str, i, tag)
|
35
|
-
status.source.should eql(i)
|
30
|
+
@world.Send(message, 0, tag) if rank != 0
|
31
|
+
if rank == 0
|
32
|
+
(@world.size-1).times do |i|
|
33
|
+
str = " "*"Hello from #{i+1}".length
|
34
|
+
status = @world.Recv(str, i+1, tag)
|
35
|
+
status.source.should eql(i+1)
|
36
36
|
status.tag.should eql(tag)
|
37
37
|
status.error.should eq(MPI::SUCCESS)
|
38
|
-
str.should match(/\AHello from #{i}
|
38
|
+
str.should match(/\AHello from #{i+1}/)
|
39
39
|
end
|
40
40
|
end
|
41
41
|
end
|
42
42
|
|
43
43
|
it "should send and receive NArray" do
|
44
|
-
world = MPI::Comm::WORLD
|
45
44
|
tag = 0
|
45
|
+
rank = @world.rank
|
46
46
|
[NArray[1,2,3], NArray[3.0,2.0,1.0]].each do |ary0|
|
47
47
|
ary0 = NArray[1,2,3]
|
48
|
-
world.Send(ary0, 0, tag)
|
49
|
-
if
|
50
|
-
world.size.times do |i|
|
48
|
+
@world.Send(ary0, 0, tag) if rank != 0
|
49
|
+
if rank == 0
|
50
|
+
(@world.size-1).times do |i|
|
51
51
|
ary1 = NArray.new(ary0.typecode, ary0.total)
|
52
|
-
status = world.Recv(ary1, i, tag)
|
53
|
-
status.source.should eql(i)
|
52
|
+
status = @world.Recv(ary1, i+1, tag)
|
53
|
+
status.source.should eql(i+1)
|
54
54
|
status.tag.should eql(tag)
|
55
55
|
status.error.should eq(MPI::SUCCESS)
|
56
56
|
ary1.should == ary0
|
@@ -60,34 +60,35 @@ describe "MPI" do
|
|
60
60
|
end
|
61
61
|
|
62
62
|
it "should send and receive without blocking" do
|
63
|
-
world = MPI::Comm::WORLD
|
64
|
-
message = "Hello from #{world.rank}"
|
65
63
|
tag = 0
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
64
|
+
rank = @world.rank
|
65
|
+
message = "Hello from #{rank}"
|
66
|
+
if rank != 0
|
67
|
+
request = @world.Isend(message, 0, tag)
|
68
|
+
status = request.Wait
|
69
|
+
# status.source.should eql(rank)
|
70
|
+
status.tag.should eql(tag)
|
71
|
+
end
|
72
|
+
if rank == 0
|
73
|
+
(@world.size-1).times do |i|
|
74
|
+
str = " "*"Hello from #{i+1}".length
|
75
|
+
request_recv = @world.Irecv(str, i+1, tag)
|
76
|
+
status = request_recv.Wait
|
77
|
+
status.source.should eql(i+1)
|
76
78
|
status.tag.should eql(tag)
|
77
|
-
str.should match(/\AHello from #{i}
|
79
|
+
str.should match(/\AHello from #{i+1}/)
|
78
80
|
end
|
79
81
|
end
|
80
82
|
end
|
81
83
|
|
82
84
|
it "should gather data" do
|
83
|
-
|
84
|
-
|
85
|
-
size = world.size
|
85
|
+
rank = @world.rank
|
86
|
+
size = @world.size
|
86
87
|
root = 0
|
87
88
|
bufsize = 2
|
88
89
|
sendbuf = rank.to_s*bufsize
|
89
90
|
recvbuf = rank == root ? "?"*bufsize*size : nil
|
90
|
-
world.Gather(sendbuf, recvbuf, root)
|
91
|
+
@world.Gather(sendbuf, recvbuf, root)
|
91
92
|
if rank == root
|
92
93
|
str = ""
|
93
94
|
size.times{|i| str << i.to_s*bufsize}
|
@@ -96,21 +97,19 @@ describe "MPI" do
|
|
96
97
|
end
|
97
98
|
|
98
99
|
it "should gather data to all processes (allgather)" do
|
99
|
-
|
100
|
-
|
101
|
-
size = world.size
|
100
|
+
rank = @world.rank
|
101
|
+
size = @world.size
|
102
102
|
bufsize = 2
|
103
103
|
sendbuf = rank.to_s*bufsize
|
104
104
|
recvbuf = "?"*bufsize*size
|
105
|
-
world.Allgather(sendbuf, recvbuf)
|
105
|
+
@world.Allgather(sendbuf, recvbuf)
|
106
106
|
str = ""
|
107
107
|
size.times{|i| str << i.to_s*bufsize}
|
108
108
|
recvbuf.should eql(str)
|
109
109
|
end
|
110
110
|
|
111
111
|
it "should broad cast data (bcast)" do
|
112
|
-
|
113
|
-
rank = world.rank
|
112
|
+
rank = @world.rank
|
114
113
|
root = 0
|
115
114
|
bufsize = 2
|
116
115
|
if rank == root
|
@@ -118,14 +117,13 @@ describe "MPI" do
|
|
118
117
|
else
|
119
118
|
buffer = " "*bufsize
|
120
119
|
end
|
121
|
-
world.Bcast(buffer, root)
|
120
|
+
@world.Bcast(buffer, root)
|
122
121
|
buffer.should eql(root.to_s*bufsize)
|
123
122
|
end
|
124
123
|
|
125
124
|
it "should scatter data" do
|
126
|
-
|
127
|
-
|
128
|
-
size = world.size
|
125
|
+
rank = @world.rank
|
126
|
+
size = @world.size
|
129
127
|
root = 0
|
130
128
|
bufsize = 2
|
131
129
|
if rank == root
|
@@ -135,32 +133,50 @@ describe "MPI" do
|
|
135
133
|
sendbuf = nil
|
136
134
|
end
|
137
135
|
recvbuf = " "*bufsize
|
138
|
-
world.Scatter(sendbuf, recvbuf, root)
|
136
|
+
@world.Scatter(sendbuf, recvbuf, root)
|
139
137
|
recvbuf.should eql(rank.to_s*bufsize)
|
140
138
|
end
|
141
139
|
|
140
|
+
it "should send and recv data (sendrecv)" do
|
141
|
+
rank = @world.rank
|
142
|
+
size = @world.size
|
143
|
+
dest = rank-1
|
144
|
+
dest = size-1 if dest < 0
|
145
|
+
#dest = MPI::PROC_NULL if dest < 0
|
146
|
+
source = rank+1
|
147
|
+
source = 0 if source > size-1
|
148
|
+
#source = MPI::PROC_NULL if source > size-1
|
149
|
+
sendtag = rank
|
150
|
+
recvtag = source
|
151
|
+
bufsize = 2
|
152
|
+
sendbuf = rank.to_s*bufsize
|
153
|
+
recvbuf = " "*bufsize
|
154
|
+
@world.Sendrecv(sendbuf, dest, sendtag, recvbuf, source, recvtag);
|
155
|
+
if source != MPI::PROC_NULL
|
156
|
+
recvbuf.should eql(source.to_s*bufsize)
|
157
|
+
end
|
158
|
+
end
|
159
|
+
|
142
160
|
it "should change data between each others (alltoall)" do
|
143
|
-
|
144
|
-
|
145
|
-
size = world.size
|
161
|
+
rank = @world.rank
|
162
|
+
size = @world.size
|
146
163
|
bufsize = 2
|
147
164
|
sendbuf = rank.to_s*bufsize*size
|
148
165
|
recvbuf = "?"*bufsize*size
|
149
|
-
world.Alltoall(sendbuf, recvbuf)
|
166
|
+
@world.Alltoall(sendbuf, recvbuf)
|
150
167
|
str = ""
|
151
168
|
size.times{|i| str << i.to_s*bufsize}
|
152
169
|
recvbuf.should eql(str)
|
153
170
|
end
|
154
171
|
|
155
172
|
it "should reduce data" do
|
156
|
-
|
157
|
-
|
158
|
-
size = world.size
|
173
|
+
rank = @world.rank
|
174
|
+
size = @world.size
|
159
175
|
root = 0
|
160
176
|
bufsize = 2
|
161
177
|
sendbuf = NArray.to_na([rank]*bufsize)
|
162
178
|
recvbuf = rank == root ? NArray.new(sendbuf.typecode,bufsize) : nil
|
163
|
-
world.Reduce(sendbuf, recvbuf, MPI::Op::SUM, root)
|
179
|
+
@world.Reduce(sendbuf, recvbuf, MPI::Op::SUM, root)
|
164
180
|
if rank == root
|
165
181
|
ary = NArray.new(sendbuf.typecode,bufsize).fill(size*(size-1)/2.0)
|
166
182
|
recvbuf.should == ary
|
@@ -168,25 +184,24 @@ describe "MPI" do
|
|
168
184
|
end
|
169
185
|
|
170
186
|
it "should reduce data and send to all processes (allreduce)" do
|
171
|
-
|
172
|
-
|
173
|
-
size = world.size
|
187
|
+
rank = @world.rank
|
188
|
+
size = @world.size
|
174
189
|
bufsize = 2
|
175
190
|
sendbuf = NArray.to_na([rank]*bufsize)
|
176
191
|
recvbuf = NArray.new(sendbuf.typecode,bufsize)
|
177
|
-
world.Allreduce(sendbuf, recvbuf, MPI::Op::SUM)
|
192
|
+
@world.Allreduce(sendbuf, recvbuf, MPI::Op::SUM)
|
178
193
|
ary = NArray.new(sendbuf.typecode,bufsize).fill(size*(size-1)/2.0)
|
179
194
|
recvbuf.should == ary
|
180
195
|
end
|
181
196
|
|
182
|
-
|
197
|
+
it "should not raise exception in calling barrier" do
|
198
|
+
@world.Barrier
|
199
|
+
end
|
183
200
|
|
184
201
|
|
185
202
|
it "shoud raise exeption" do
|
186
|
-
world
|
187
|
-
|
188
|
-
lambda{ world.Send("", world.size+1, 0) }.should raise_error(MPI::ERR::RANK)
|
189
|
-
world.Errhandler.should eql(MPI::Errhandler::ERRORS_RETURN)
|
203
|
+
lambda{ @world.Send("", @world.size+1, 0) }.should raise_error(MPI::ERR::RANK)
|
204
|
+
@world.Errhandler.should eql(MPI::Errhandler::ERRORS_RETURN)
|
190
205
|
end
|
191
206
|
|
192
207
|
end
|
metadata
CHANGED
@@ -1,13 +1,8 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-mpi
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
|
5
|
-
|
6
|
-
segments:
|
7
|
-
- 0
|
8
|
-
- 1
|
9
|
-
- 0
|
10
|
-
version: 0.1.0
|
4
|
+
prerelease:
|
5
|
+
version: 0.2.0
|
11
6
|
platform: ruby
|
12
7
|
authors:
|
13
8
|
- Seiya Nishizawa
|
@@ -15,85 +10,63 @@ autorequire:
|
|
15
10
|
bindir: bin
|
16
11
|
cert_chain: []
|
17
12
|
|
18
|
-
date: 2011-04-
|
19
|
-
default_executable:
|
13
|
+
date: 2011-04-22 00:00:00 Z
|
20
14
|
dependencies:
|
21
15
|
- !ruby/object:Gem::Dependency
|
22
|
-
prerelease: false
|
23
16
|
name: rspec
|
24
|
-
|
17
|
+
requirement: &id001 !ruby/object:Gem::Requirement
|
25
18
|
none: false
|
26
19
|
requirements:
|
27
20
|
- - ~>
|
28
21
|
- !ruby/object:Gem::Version
|
29
|
-
hash: 3
|
30
|
-
segments:
|
31
|
-
- 2
|
32
|
-
- 3
|
33
|
-
- 0
|
34
22
|
version: 2.3.0
|
35
|
-
requirement: *id001
|
36
23
|
type: :development
|
37
|
-
- !ruby/object:Gem::Dependency
|
38
24
|
prerelease: false
|
25
|
+
version_requirements: *id001
|
26
|
+
- !ruby/object:Gem::Dependency
|
39
27
|
name: bundler
|
40
|
-
|
28
|
+
requirement: &id002 !ruby/object:Gem::Requirement
|
41
29
|
none: false
|
42
30
|
requirements:
|
43
31
|
- - ~>
|
44
32
|
- !ruby/object:Gem::Version
|
45
|
-
hash: 23
|
46
|
-
segments:
|
47
|
-
- 1
|
48
|
-
- 0
|
49
|
-
- 0
|
50
33
|
version: 1.0.0
|
51
|
-
requirement: *id002
|
52
34
|
type: :development
|
53
|
-
- !ruby/object:Gem::Dependency
|
54
35
|
prerelease: false
|
36
|
+
version_requirements: *id002
|
37
|
+
- !ruby/object:Gem::Dependency
|
55
38
|
name: jeweler
|
56
|
-
|
39
|
+
requirement: &id003 !ruby/object:Gem::Requirement
|
57
40
|
none: false
|
58
41
|
requirements:
|
59
42
|
- - ~>
|
60
43
|
- !ruby/object:Gem::Version
|
61
|
-
hash: 7
|
62
|
-
segments:
|
63
|
-
- 1
|
64
|
-
- 5
|
65
|
-
- 2
|
66
44
|
version: 1.5.2
|
67
|
-
requirement: *id003
|
68
45
|
type: :development
|
69
|
-
- !ruby/object:Gem::Dependency
|
70
46
|
prerelease: false
|
47
|
+
version_requirements: *id003
|
48
|
+
- !ruby/object:Gem::Dependency
|
71
49
|
name: rcov
|
72
|
-
|
50
|
+
requirement: &id004 !ruby/object:Gem::Requirement
|
73
51
|
none: false
|
74
52
|
requirements:
|
75
53
|
- - ">="
|
76
54
|
- !ruby/object:Gem::Version
|
77
|
-
hash: 3
|
78
|
-
segments:
|
79
|
-
- 0
|
80
55
|
version: "0"
|
81
|
-
requirement: *id004
|
82
56
|
type: :development
|
83
|
-
- !ruby/object:Gem::Dependency
|
84
57
|
prerelease: false
|
58
|
+
version_requirements: *id004
|
59
|
+
- !ruby/object:Gem::Dependency
|
85
60
|
name: narray
|
86
|
-
|
61
|
+
requirement: &id005 !ruby/object:Gem::Requirement
|
87
62
|
none: false
|
88
63
|
requirements:
|
89
64
|
- - ">="
|
90
65
|
- !ruby/object:Gem::Version
|
91
|
-
hash: 3
|
92
|
-
segments:
|
93
|
-
- 0
|
94
66
|
version: "0"
|
95
|
-
requirement: *id005
|
96
67
|
type: :development
|
68
|
+
prerelease: false
|
69
|
+
version_requirements: *id005
|
97
70
|
description: A ruby binding of Message Passing Interface (MPI), which is an API specification that allows processes to communicate with one another by sending and receiving messages.
|
98
71
|
email: seiya@gfd-dennou.org
|
99
72
|
executables: []
|
@@ -120,7 +93,6 @@ files:
|
|
120
93
|
- samples/narray.rb
|
121
94
|
- spec/ruby-mpi_spec.rb
|
122
95
|
- spec/spec_helper.rb
|
123
|
-
has_rdoc: true
|
124
96
|
homepage: http://github.com/seiya/ruby-mpi
|
125
97
|
licenses:
|
126
98
|
- MIT
|
@@ -134,7 +106,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
134
106
|
requirements:
|
135
107
|
- - ">="
|
136
108
|
- !ruby/object:Gem::Version
|
137
|
-
hash:
|
109
|
+
hash: -3549605766415082841
|
138
110
|
segments:
|
139
111
|
- 0
|
140
112
|
version: "0"
|
@@ -143,14 +115,11 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
143
115
|
requirements:
|
144
116
|
- - ">="
|
145
117
|
- !ruby/object:Gem::Version
|
146
|
-
hash: 3
|
147
|
-
segments:
|
148
|
-
- 0
|
149
118
|
version: "0"
|
150
119
|
requirements: []
|
151
120
|
|
152
121
|
rubyforge_project:
|
153
|
-
rubygems_version: 1.
|
122
|
+
rubygems_version: 1.7.2
|
154
123
|
signing_key:
|
155
124
|
specification_version: 3
|
156
125
|
summary: A ruby binding of MPI
|