ruby-mpi 0.3.2 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/LICENSE.txt +1 -1
- data/README.rdoc +3 -4
- data/Rakefile +1 -1
- data/VERSION +1 -1
- data/ext/mpi/mpi.c +800 -7
- data/ruby-mpi.gemspec +30 -35
- data/samples/kmeans.rb +137 -0
- data/samples/pi.rb +59 -0
- data/spec/ruby-mpi_spec.rb +407 -14
- metadata +6 -5
data/ruby-mpi.gemspec
CHANGED
@@ -2,20 +2,20 @@
|
|
2
2
|
# DO NOT EDIT THIS FILE DIRECTLY
|
3
3
|
# Instead, edit Jeweler::Tasks in Rakefile, and run 'rake gemspec'
|
4
4
|
# -*- encoding: utf-8 -*-
|
5
|
-
# stub: ruby-mpi 0.
|
5
|
+
# stub: ruby-mpi 0.4.0 ruby lib
|
6
6
|
# stub: ext/mpi/extconf.rb
|
7
7
|
|
8
8
|
Gem::Specification.new do |s|
|
9
|
-
s.name = "ruby-mpi"
|
10
|
-
s.version = "0.
|
9
|
+
s.name = "ruby-mpi".freeze
|
10
|
+
s.version = "0.4.0"
|
11
11
|
|
12
|
-
s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
|
13
|
-
s.require_paths = ["lib"]
|
14
|
-
s.authors = ["Seiya Nishizawa"]
|
15
|
-
s.date = "
|
16
|
-
s.description = "A ruby binding of Message Passing Interface (MPI), which is an API specification that allows processes to communicate with one another by sending and receiving messages."
|
17
|
-
s.email = "seiya@gfd-dennou.org"
|
18
|
-
s.extensions = ["ext/mpi/extconf.rb"]
|
12
|
+
s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version=
|
13
|
+
s.require_paths = ["lib".freeze]
|
14
|
+
s.authors = ["Seiya Nishizawa".freeze]
|
15
|
+
s.date = "2024-11-22"
|
16
|
+
s.description = "A ruby binding of Message Passing Interface (MPI), which is an API specification that allows processes to communicate with one another by sending and receiving messages.".freeze
|
17
|
+
s.email = "seiya@gfd-dennou.org".freeze
|
18
|
+
s.extensions = ["ext/mpi/extconf.rb".freeze]
|
19
19
|
s.extra_rdoc_files = [
|
20
20
|
"LICENSE.txt",
|
21
21
|
"README.rdoc"
|
@@ -35,42 +35,37 @@ Gem::Specification.new do |s|
|
|
35
35
|
"lib/mpi/utils.rb",
|
36
36
|
"ruby-mpi.gemspec",
|
37
37
|
"samples/hello.rb",
|
38
|
+
"samples/kmeans.rb",
|
38
39
|
"samples/narray.rb",
|
39
40
|
"samples/narray_offset.rb",
|
41
|
+
"samples/pi.rb",
|
40
42
|
"spec/ruby-mpi_spec.rb",
|
41
43
|
"spec/spec_helper.rb",
|
42
44
|
"test/test_utils.rb"
|
43
45
|
]
|
44
|
-
s.homepage = "http://github.com/
|
45
|
-
s.licenses = ["MIT"]
|
46
|
-
s.rubygems_version = "2.
|
47
|
-
s.summary = "A ruby binding of MPI"
|
46
|
+
s.homepage = "http://github.com/gfd-dennou-club/ruby-mpi".freeze
|
47
|
+
s.licenses = ["MIT".freeze]
|
48
|
+
s.rubygems_version = "3.2.5".freeze
|
49
|
+
s.summary = "A ruby binding of MPI".freeze
|
48
50
|
|
49
51
|
if s.respond_to? :specification_version then
|
50
52
|
s.specification_version = 4
|
53
|
+
end
|
51
54
|
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
else
|
60
|
-
s.add_dependency(%q<numru-narray>, ["~> 1.0"])
|
61
|
-
s.add_dependency(%q<rspec>, [">= 2.3.0"])
|
62
|
-
s.add_dependency(%q<bundler>, [">= 1.0.0"])
|
63
|
-
s.add_dependency(%q<jeweler>, [">= 1.5.2"])
|
64
|
-
s.add_dependency(%q<simplecov>, [">= 0"])
|
65
|
-
s.add_dependency(%q<rake-compiler>, [">= 0"])
|
66
|
-
end
|
55
|
+
if s.respond_to? :add_runtime_dependency then
|
56
|
+
s.add_runtime_dependency(%q<numru-narray>.freeze, ["~> 1.0"])
|
57
|
+
s.add_development_dependency(%q<rspec>.freeze, [">= 2.3.0"])
|
58
|
+
s.add_development_dependency(%q<bundler>.freeze, [">= 1.0.0"])
|
59
|
+
s.add_development_dependency(%q<jeweler>.freeze, [">= 1.5.2"])
|
60
|
+
s.add_development_dependency(%q<simplecov>.freeze, [">= 0"])
|
61
|
+
s.add_development_dependency(%q<rake-compiler>.freeze, [">= 0"])
|
67
62
|
else
|
68
|
-
s.add_dependency(%q<numru-narray
|
69
|
-
s.add_dependency(%q<rspec
|
70
|
-
s.add_dependency(%q<bundler
|
71
|
-
s.add_dependency(%q<jeweler
|
72
|
-
s.add_dependency(%q<simplecov
|
73
|
-
s.add_dependency(%q<rake-compiler
|
63
|
+
s.add_dependency(%q<numru-narray>.freeze, ["~> 1.0"])
|
64
|
+
s.add_dependency(%q<rspec>.freeze, [">= 2.3.0"])
|
65
|
+
s.add_dependency(%q<bundler>.freeze, [">= 1.0.0"])
|
66
|
+
s.add_dependency(%q<jeweler>.freeze, [">= 1.5.2"])
|
67
|
+
s.add_dependency(%q<simplecov>.freeze, [">= 0"])
|
68
|
+
s.add_dependency(%q<rake-compiler>.freeze, [">= 0"])
|
74
69
|
end
|
75
70
|
end
|
76
71
|
|
data/samples/kmeans.rb
ADDED
@@ -0,0 +1,137 @@
|
|
1
|
+
# Run this program using
|
2
|
+
#
|
3
|
+
# mpirun -np numprocs ruby kmeans.rb numpoints numclusters
|
4
|
+
#
|
5
|
+
# where numprocs, numpoints and numclusters are integers
|
6
|
+
# and numprocs <= numpoints and numclusters <= numpoints
|
7
|
+
#
|
8
|
+
# Parallelization assumes that numclusters << numpoints
|
9
|
+
# and that numprocs << numpoints
|
10
|
+
#
|
11
|
+
# The program is based on the description at
|
12
|
+
# https://en.wikipedia.org/wiki/K-means_clustering
|
13
|
+
|
14
|
+
require "mpi"
|
15
|
+
if defined?(NumRu::NArray)
|
16
|
+
include NumRu
|
17
|
+
end
|
18
|
+
|
19
|
+
def generate_points count
|
20
|
+
x = NArray.float(count).random
|
21
|
+
y = NArray.float(count).random
|
22
|
+
return x , y
|
23
|
+
end
|
24
|
+
|
25
|
+
MPI.Init
|
26
|
+
|
27
|
+
world = MPI::Comm::WORLD
|
28
|
+
|
29
|
+
size = world.size
|
30
|
+
rank = world.rank
|
31
|
+
|
32
|
+
def usage(rank)
|
33
|
+
if rank==0
|
34
|
+
print <<EOF
|
35
|
+
Usage: mpirun -np numproc ruby #$0 numpoints numclusters
|
36
|
+
|
37
|
+
numpoints and numclusters must be integers > 0
|
38
|
+
numclusters <= numpoints and numproc <= numpoints
|
39
|
+
EOF
|
40
|
+
end
|
41
|
+
MPI.Finalize
|
42
|
+
exit -1
|
43
|
+
end
|
44
|
+
|
45
|
+
usage(rank) if ARGV.length != 2
|
46
|
+
usage(rank) if ( ( /^\d+$/ =~ ARGV[0] ) != 0)
|
47
|
+
usage(rank) if ( ( /^\d+$/ =~ ARGV[1] ) != 0)
|
48
|
+
n_points = ARGV[0].to_i
|
49
|
+
n_clusters = ARGV[1].to_i
|
50
|
+
usage(rank) unless n_points > size
|
51
|
+
usage(rank) unless n_clusters > 0
|
52
|
+
usage(rank) unless n_points >= n_clusters
|
53
|
+
|
54
|
+
my_points = n_points.div(size)
|
55
|
+
if ( n_points % size > rank )
|
56
|
+
my_points += 1
|
57
|
+
end
|
58
|
+
|
59
|
+
cluster_x = NArray.float(n_clusters)
|
60
|
+
cluster_y = NArray.float(n_clusters)
|
61
|
+
my_cluster = NArray.int(my_points)
|
62
|
+
min_distance = NArray.float(my_points)
|
63
|
+
distance = NArray.float(n_clusters)
|
64
|
+
cluster_member_count = NArray.int(n_clusters)
|
65
|
+
total_cluster_x_sum = NArray.float(n_clusters)
|
66
|
+
total_cluster_y_sum = NArray.float(n_clusters)
|
67
|
+
total_cluster_member_count = NArray.int(n_clusters)
|
68
|
+
my_cluster_x = NArray.float(n_clusters)
|
69
|
+
my_cluster_y = NArray.float(n_clusters)
|
70
|
+
my_cluster_member_count = NArray.int(n_clusters)
|
71
|
+
my_energy = NArray.float(1)
|
72
|
+
total_energy = NArray.float(1)
|
73
|
+
random_x = NArray.float(n_clusters)
|
74
|
+
random_y = NArray.float(n_clusters)
|
75
|
+
|
76
|
+
my_x, my_y = generate_points my_points
|
77
|
+
if rank == 0
|
78
|
+
cluster_x, cluster_y = generate_points n_clusters
|
79
|
+
end
|
80
|
+
world.Bcast(cluster_x,0)
|
81
|
+
world.Bcast(cluster_y,0)
|
82
|
+
|
83
|
+
iter = 0
|
84
|
+
# Do 10 iterations for testing purposes
|
85
|
+
# in practice would use some convergence
|
86
|
+
# criteria
|
87
|
+
while iter < 10 do
|
88
|
+
# Find cluster and calculate energy
|
89
|
+
i = 0
|
90
|
+
my_energy[0] = 0
|
91
|
+
while i < my_points do
|
92
|
+
distance = ( cluster_x - my_x[i] )**2 + ( cluster_y - my_y[i] )**2
|
93
|
+
min_distance = distance.min
|
94
|
+
my_energy[0] += min_distance
|
95
|
+
# If multiple minimum values, take the first one
|
96
|
+
my_cluster[i] = distance.eq(min_distance).where[0]
|
97
|
+
i +=1
|
98
|
+
end
|
99
|
+
world.Allreduce(my_energy,total_energy,MPI::Op::SUM)
|
100
|
+
if rank == 0
|
101
|
+
p total_energy[0]
|
102
|
+
end
|
103
|
+
# Find new cluster centroids
|
104
|
+
j = 0
|
105
|
+
while j < n_clusters do
|
106
|
+
mask = my_cluster.eq(j)
|
107
|
+
my_cluster_member_count[j] = mask.count_true
|
108
|
+
if mask.any?
|
109
|
+
my_cluster_x[j] = (my_x[mask]).sum
|
110
|
+
my_cluster_y[j] = (my_y[mask]).sum
|
111
|
+
end
|
112
|
+
j +=1
|
113
|
+
end
|
114
|
+
world.Allreduce(my_cluster_member_count,total_cluster_member_count,MPI::Op::SUM)
|
115
|
+
world.Allreduce(my_cluster_x,total_cluster_x_sum,MPI::Op::SUM)
|
116
|
+
world.Allreduce(my_cluster_y,total_cluster_y_sum,MPI::Op::SUM)
|
117
|
+
# If a cluster is empty, choose a random point to try
|
118
|
+
no_members = total_cluster_member_count.eq(0)
|
119
|
+
if no_members.any?
|
120
|
+
if rank == 0
|
121
|
+
random_x, random_y = generate_points no_members.count_true
|
122
|
+
total_cluster_member_count[no_members]= 1
|
123
|
+
total_cluster_x_sum[no_members] = random_x
|
124
|
+
total_cluster_y_sum[no_members] = random_y
|
125
|
+
cluster_x = total_cluster_x_sum / total_cluster_member_count
|
126
|
+
cluster_y = total_cluster_y_sum / total_cluster_member_count
|
127
|
+
end
|
128
|
+
world.Bcast(cluster_x,0)
|
129
|
+
world.Bcast(cluster_y,0)
|
130
|
+
else
|
131
|
+
cluster_x = total_cluster_x_sum / total_cluster_member_count
|
132
|
+
cluster_y = total_cluster_y_sum / total_cluster_member_count
|
133
|
+
end
|
134
|
+
iter += 1
|
135
|
+
end
|
136
|
+
|
137
|
+
MPI.Finalize
|
data/samples/pi.rb
ADDED
@@ -0,0 +1,59 @@
|
|
1
|
+
# Run this program using
|
2
|
+
#
|
3
|
+
# mpirun -np 2 ruby pi.rb numpoints
|
4
|
+
#
|
5
|
+
# where numpoints is an integer
|
6
|
+
#
|
7
|
+
# The program is based on an example from
|
8
|
+
# https://carpentries-incubator.github.io/hpc-intro/16-parallel/index.html
|
9
|
+
|
10
|
+
require "mpi"
|
11
|
+
if defined?(NumRu::NArray)
|
12
|
+
include NumRu
|
13
|
+
end
|
14
|
+
|
15
|
+
def inside_circle my_count
|
16
|
+
x = NArray.float(my_count).random
|
17
|
+
y = NArray.float(my_count).random
|
18
|
+
a = ((x**2 + y**2) < 1.0)
|
19
|
+
return a.count_true
|
20
|
+
end
|
21
|
+
|
22
|
+
MPI.Init
|
23
|
+
|
24
|
+
world = MPI::Comm::WORLD
|
25
|
+
|
26
|
+
size = world.size
|
27
|
+
rank = world.rank
|
28
|
+
|
29
|
+
def usage(rank)
|
30
|
+
if rank==0
|
31
|
+
print <<EOF
|
32
|
+
Usage: mpirun -np numproc ruby #$0 numpoints
|
33
|
+
numpoints must be an integer > 0
|
34
|
+
EOF
|
35
|
+
end
|
36
|
+
MPI.Finalize
|
37
|
+
exit -1
|
38
|
+
end
|
39
|
+
usage(rank) if ARGV.length != 1
|
40
|
+
usage(rank) if ( ( /^\d+$/ =~ ARGV[0] ) != 0)
|
41
|
+
n_samples = ARGV[0].to_i
|
42
|
+
usage(rank) unless n_samples > 0
|
43
|
+
|
44
|
+
my_samples = n_samples.div(size)
|
45
|
+
if ( n_samples % size > rank )
|
46
|
+
my_samples = my_samples + 1
|
47
|
+
end
|
48
|
+
|
49
|
+
my_count = NArray[0]
|
50
|
+
count = NArray[0]
|
51
|
+
|
52
|
+
my_count[0] = inside_circle my_samples
|
53
|
+
|
54
|
+
world.Reduce(my_count,count,MPI::Op::SUM,0)
|
55
|
+
if ( rank == 0 )
|
56
|
+
p "Pi is approximately " + ((count[0]*4.0)/(1.0*n_samples)).to_s
|
57
|
+
end
|
58
|
+
|
59
|
+
MPI.Finalize
|