forkandreturn 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +15 -0
- data/README +11 -0
- data/VERSION +1 -0
- data/lib/forkandreturn/enumerable.rb +147 -0
- data/lib/forkandreturn/exceptions.rb +4 -0
- data/lib/forkandreturn/forkandreturn.rb +126 -0
- data/lib/forkandreturn/util.rb +74 -0
- data/lib/forkandreturn.rb +6 -0
- data/test/test.rb +253 -0
- metadata +75 -0
data/LICENSE
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
# Copyright Erik Veenstra <fork_and_return@erikveen.dds.nl>
|
2
|
+
#
|
3
|
+
# This program is free software; you can redistribute it and/or
|
4
|
+
# modify it under the terms of the GNU General Public License,
|
5
|
+
# version 2, as published by the Free Software Foundation.
|
6
|
+
#
|
7
|
+
# This program is distributed in the hope that it will be
|
8
|
+
# useful, but WITHOUT ANY WARRANTY; without even the implied
|
9
|
+
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
10
|
+
# PURPOSE. See the GNU General Public License for more details.
|
11
|
+
#
|
12
|
+
# You should have received a copy of the GNU General Public
|
13
|
+
# License along with this program; if not, write to the Free
|
14
|
+
# Software Foundation, Inc., 59 Temple Place, Suite 330,
|
15
|
+
# Boston, MA 02111-1307 USA.
|
data/README
ADDED
@@ -0,0 +1,11 @@
|
|
1
|
+
ForkAndReturn implements a couple of methods that simplifies
|
2
|
+
running a block of code in a subprocess. The result (Ruby
|
3
|
+
object or exception) of the block will be available in the
|
4
|
+
parent process.
|
5
|
+
|
6
|
+
ForkAndReturn uses Process.fork(), so it only runs on platforms
|
7
|
+
where Process.fork() is implemented.
|
8
|
+
|
9
|
+
ForkAndReturn implements the low level stuff. Enumerable is
|
10
|
+
enriched with some methods which should be used instead of
|
11
|
+
ForkAndReturn under normal circumstances.
|
data/VERSION
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
0.1.0
|
@@ -0,0 +1,147 @@
|
|
1
|
+
module Enumerable
|
2
|
+
# For each object in the enumeration, call the block in a seperate process and pass the object to the block and collect the results of the blocks.
|
3
|
+
# It must be one of the easiest ways of parallel processing for Ruby.
|
4
|
+
#
|
5
|
+
# Example:
|
6
|
+
#
|
7
|
+
# [1, 2, 3, 4].concurrent_collect do |object|
|
8
|
+
# 2*object
|
9
|
+
# end # ===> [2, 4, 6, 8]
|
10
|
+
#
|
11
|
+
# This runs each "2*object" in a seperate process.
|
12
|
+
# Hopefully, the processes are spread over all available CPU's.
|
13
|
+
# That's a simple way of parallel processing!
|
14
|
+
#
|
15
|
+
# Note that the code in the block is run in a seperate process, so updating objects and variables in the block won't affect the parent process:
|
16
|
+
#
|
17
|
+
# count = 0
|
18
|
+
# [...].concurrent_collect do
|
19
|
+
# count += 1
|
20
|
+
# end
|
21
|
+
# count # ==> 0
|
22
|
+
#
|
23
|
+
# concurrent_collect() is suitable for handling a couple of very CPU intensive jobs, like parsing large XML files.
|
24
|
+
|
25
|
+
def concurrent_collect(max_concurrent_workers=-1, &block)
|
26
|
+
max_concurrent_workers = 0 unless ForkAndReturn::Util.multi_core? and ForkAndReturn::Util.forkable?
|
27
|
+
|
28
|
+
case
|
29
|
+
when max_concurrent_workers < 0 # No limit.
|
30
|
+
self.collect do |object|
|
31
|
+
ForkAndReturn.fork_and_return_core do
|
32
|
+
if block.arity > 1 and object.kind_of?(Enumerable)
|
33
|
+
yield(*object.to_a)
|
34
|
+
else
|
35
|
+
yield(object)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end.collect do |wait|
|
39
|
+
wait.call
|
40
|
+
end.collect do |load|
|
41
|
+
load.call
|
42
|
+
end.collect do |result|
|
43
|
+
result.call
|
44
|
+
end
|
45
|
+
when max_concurrent_workers == 0 # No fork.
|
46
|
+
self.collect(&block)
|
47
|
+
when max_concurrent_workers > 0
|
48
|
+
self.threaded_collect(max_concurrent_workers) do |object|
|
49
|
+
ForkAndReturn.fork_and_return_core do
|
50
|
+
if block.arity > 1 and object.kind_of?(Enumerable)
|
51
|
+
yield(*object.to_a)
|
52
|
+
else
|
53
|
+
yield(object)
|
54
|
+
end
|
55
|
+
end.call
|
56
|
+
end.collect do |load|
|
57
|
+
load.call
|
58
|
+
end.collect do |result|
|
59
|
+
result.call
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
# In clustered_concurrent_collect(), all objects in the enumeration are clustered.
|
65
|
+
# Each cluster is than handled in a seperate process. Compare this to concurrent_collect(), where each object is handled in a separate process.
|
66
|
+
#
|
67
|
+
# However, the caller won't will not be aware of the clusters: The interface is exactly the same as concurrent_collect() and Enumerable.collect().
|
68
|
+
#
|
69
|
+
# clustered_concurrent_collect() is suitable for handling a lot of not too CPU intensive jobs.
|
70
|
+
|
71
|
+
def clustered_concurrent_collect(number_of_clusters=ForkAndReturn::Util.cores, &block)
|
72
|
+
number_of_clusters = 0 unless ForkAndReturn::Util.multi_core? and ForkAndReturn::Util.forkable?
|
73
|
+
|
74
|
+
if number_of_clusters < 1
|
75
|
+
self.concurrent_collect(number_of_clusters, &block)
|
76
|
+
else
|
77
|
+
clusters = [] # One cluster per thread.
|
78
|
+
last_pos = nil
|
79
|
+
res = []
|
80
|
+
|
81
|
+
self.each_with_index do |object, pos|
|
82
|
+
(clusters[pos%number_of_clusters] ||= []) << object
|
83
|
+
|
84
|
+
last_pos = pos
|
85
|
+
end
|
86
|
+
|
87
|
+
clusters.concurrent_collect(-1) do |cluster|
|
88
|
+
cluster.collect do |object|
|
89
|
+
if block.arity > 1 and object.kind_of?(Enumerable)
|
90
|
+
yield(*object.to_a)
|
91
|
+
else
|
92
|
+
yield(object)
|
93
|
+
end
|
94
|
+
end + (cluster.length == clusters[0].length ? [] : [nil]) # Add padding nil, in order to be able to transpose
|
95
|
+
end.transpose.each do |array|
|
96
|
+
res.concat(array)
|
97
|
+
end
|
98
|
+
|
99
|
+
res[0..last_pos] # Remove padding nils.
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
# Like concurrent_collect, but it's "select" instead of "collect".
|
104
|
+
|
105
|
+
def concurrent_select(*args, &block)
|
106
|
+
self.zip(self.concurrent_collect(*args, &block)).inject([]){|r, (o, b)| r << o if b ; r}
|
107
|
+
end
|
108
|
+
|
109
|
+
# Like concurrent_collect, but it's "reject" instead of "collect".
|
110
|
+
|
111
|
+
def concurrent_reject(*args, &block)
|
112
|
+
self.zip(self.concurrent_collect(*args, &block)).inject([]){|r, (o, b)| r << o unless b ; r}
|
113
|
+
end
|
114
|
+
|
115
|
+
# Like concurrent_collect, but it's "each" instead of "collect".
|
116
|
+
|
117
|
+
def concurrent_each(*args, &block)
|
118
|
+
concurrent_collect(*args, &block)
|
119
|
+
|
120
|
+
self
|
121
|
+
end
|
122
|
+
|
123
|
+
# Like clustered_concurrent_select, but it's "select" instead of "collect".
|
124
|
+
|
125
|
+
def clustered_concurrent_select(*args, &block)
|
126
|
+
self.zip(self.clustered_concurrent_collect(*args, &block)).inject([]){|r, (o, b)| r << o if b ; r}
|
127
|
+
end
|
128
|
+
|
129
|
+
# Like clustered_concurrent_select, but it's "reject" instead of "collect".
|
130
|
+
|
131
|
+
def clustered_concurrent_reject(*args, &block)
|
132
|
+
self.zip(self.clustered_concurrent_collect(*args, &block)).inject([]){|r, (o, b)| r << o unless b ; r}
|
133
|
+
end
|
134
|
+
|
135
|
+
# Like clustered_concurrent_select, but it's "each" instead of "collect".
|
136
|
+
|
137
|
+
def clustered_concurrent_each(*args, &block)
|
138
|
+
clustered_concurrent_collect(*args, &block)
|
139
|
+
|
140
|
+
self
|
141
|
+
end
|
142
|
+
|
143
|
+
alias concurrent concurrent_collect
|
144
|
+
alias concurrent_map concurrent_collect
|
145
|
+
alias clustered_concurrent clustered_concurrent_collect
|
146
|
+
alias clustered_concurrent_map clustered_concurrent_collect
|
147
|
+
end
|
@@ -0,0 +1,126 @@
|
|
1
|
+
# ForkAndReturn implements a couple of methods that simplifies running a block of code in a subprocess.
|
2
|
+
# The result (Ruby object or exception) of the block will be available in the parent process.
|
3
|
+
#
|
4
|
+
# The intermediate return value (or exception) will be Marshal'led to disk.
|
5
|
+
# This means that it is possible to (concurrently) run thousands of child process, with a relative low memory footprint.
|
6
|
+
# Just gather the results once all child process are done.
|
7
|
+
# ForkAndReturn will handle the writing, reading and deleting of the temporary file.
|
8
|
+
#
|
9
|
+
# The core of these methods is fork_and_return_core().
|
10
|
+
# It returns some nested lambdas, which are handled by the other methods and by Enumerable#concurrent_collect().
|
11
|
+
# These lambdas handle the WAITing, LOADing and RESULTing (explained in fork_and_return_core()).
|
12
|
+
#
|
13
|
+
# The child process exits with Process.exit!(), so at_exit() blocks are skipped in the child process.
|
14
|
+
# However, both $stdout and $stderr will be flushed.
|
15
|
+
#
|
16
|
+
# Only Marshal'lable Ruby objects can be returned.
|
17
|
+
#
|
18
|
+
# ForkAndReturn uses Process.fork(), so it only runs on platforms where Process.fork() is implemented.
|
19
|
+
|
20
|
+
module ForkAndReturn
|
21
|
+
# Fork a new process and run the block of code within that process.
|
22
|
+
#
|
23
|
+
# The WAITing, LOADing and RESULTing (explained in fork_and_return_core()) will be performed immediately and the return value of the block will be returned.
|
24
|
+
#
|
25
|
+
# Example:
|
26
|
+
#
|
27
|
+
# [1, 2, 3, 4].collect do |object|
|
28
|
+
# Thread.fork do
|
29
|
+
# ForkAndReturn.fork_and_return do
|
30
|
+
# 2*object
|
31
|
+
# end
|
32
|
+
# end
|
33
|
+
# end.collect do |thread|
|
34
|
+
# thread.value
|
35
|
+
# end # ===> [2, 4, 6, 8]
|
36
|
+
#
|
37
|
+
# This runs each "2*object" in a seperate process.
|
38
|
+
# Hopefully, the processes are spread over all available CPU's.
|
39
|
+
# That's a simple way of parallel processing!
|
40
|
+
# (Although Enumerable#concurrent_collect() is even simpler...)
|
41
|
+
#
|
42
|
+
# <i>*args</i> is passed to the block.
|
43
|
+
|
44
|
+
def self.fork_and_return(*args, &block)
|
45
|
+
wait = fork_and_return_core(*args, &block)
|
46
|
+
|
47
|
+
wait.call.call.call
|
48
|
+
end
|
49
|
+
|
50
|
+
# Fork a new process and run the block of code within that process.
|
51
|
+
#
|
52
|
+
# Returns a lambda.
|
53
|
+
# If you call it, the WAITing, LOADing and RESULTing (explained in fork_and_return_core()) will be performed in one go.
|
54
|
+
#
|
55
|
+
# <i>*args</i> is passed to the block.
|
56
|
+
|
57
|
+
def self.fork_and_return_later(*args, &block)
|
58
|
+
wait = fork_and_return_core(*args, &block)
|
59
|
+
|
60
|
+
lambda{wait.call.call.call}
|
61
|
+
end
|
62
|
+
|
63
|
+
# Fork a new process and run the block of code within that process.
|
64
|
+
#
|
65
|
+
# Returns some nested lambdas:
|
66
|
+
# The first lambda is the WAIT-lambda.
|
67
|
+
# If you call the WAIT-lambda, you're going to wait for the child process to finish.
|
68
|
+
# The WAIT-lambda returns the LOAD-lambda.
|
69
|
+
# If you call the LOAD-lambda, the result of the child process (the return value
|
70
|
+
# or the exception) will be loaded from the temporary file into memory and the temporary file will be deleted.
|
71
|
+
# The LOAD-lambda returns the RESULT-lambda.
|
72
|
+
# If you call RESULT-lambda, the result of the child process will be handled.
|
73
|
+
# This means either "return the return value of the block" or "raise the exception"
|
74
|
+
#
|
75
|
+
# <i>*args</i> is passed to the block.
|
76
|
+
|
77
|
+
def self.fork_and_return_core(*args, &block)
|
78
|
+
file = Util.tempfile
|
79
|
+
|
80
|
+
#begin
|
81
|
+
pid =
|
82
|
+
Process.fork do
|
83
|
+
begin
|
84
|
+
ok, res = true, yield(*args)
|
85
|
+
rescue
|
86
|
+
ok, res = false, $!
|
87
|
+
end
|
88
|
+
|
89
|
+
File.open(file, "wb"){|f| Marshal.dump([ok, res], f)}
|
90
|
+
|
91
|
+
$stdout.flush
|
92
|
+
$stderr.flush
|
93
|
+
|
94
|
+
Process.exit! # To avoid the execution of at_exit handlers.
|
95
|
+
end
|
96
|
+
#rescue Errno::EAGAIN # Resource temporarily unavailable - fork(2)
|
97
|
+
# Kernel.sleep 0.1
|
98
|
+
|
99
|
+
# retry # TODO: Reconsider.
|
100
|
+
#end
|
101
|
+
|
102
|
+
lambda do # Wait for the result.
|
103
|
+
Process.wait(pid) # To avoid zombies.
|
104
|
+
|
105
|
+
lambda do # Load the result and delete the temp file.
|
106
|
+
begin
|
107
|
+
ok, res = File.open(file, "rb"){|f| Marshal.load(f)}
|
108
|
+
rescue Errno::ENOENT # No such file or directory
|
109
|
+
ok, res = false, WorkerError.new("the worker hasn't returned a result")
|
110
|
+
rescue EOFError # end of file reached
|
111
|
+
ok, res = false, WorkerError.new("the worker hasn't returned a result")
|
112
|
+
rescue TypeError # can't be read
|
113
|
+
ok, res = false, WorkerError.new("the worker has returned corrupt data")
|
114
|
+
ensure
|
115
|
+
File.delete(file) if File.file?(file)
|
116
|
+
end
|
117
|
+
|
118
|
+
lambda do # Handle the result.
|
119
|
+
raise res unless ok
|
120
|
+
|
121
|
+
res
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
@@ -0,0 +1,74 @@
|
|
1
|
+
module ForkAndReturn
|
2
|
+
module Util # :nodoc: Stuff copied from my personal library.
|
3
|
+
def self.multi_core?
|
4
|
+
@multi_core ||= cores > 1
|
5
|
+
end
|
6
|
+
|
7
|
+
def self.cores
|
8
|
+
@cores ||= (l = cpu_info.length) < 1 ? 1 : l
|
9
|
+
end
|
10
|
+
|
11
|
+
def self.cpu_info # TODO: Might not be correct, but it works for now.
|
12
|
+
@cpu_info ||=
|
13
|
+
begin
|
14
|
+
cpus = []
|
15
|
+
|
16
|
+
if File.file?("/proc/cpuinfo")
|
17
|
+
File.open("/proc/cpuinfo") do |f|
|
18
|
+
while line = f.gets
|
19
|
+
key, value = line.chomp.split(/\s*:\s*/, 2)
|
20
|
+
|
21
|
+
if key and value
|
22
|
+
cpus << {} if key == "processor"
|
23
|
+
|
24
|
+
cpus[-1][key] = value
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
cpus
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
def self.forkable?
|
35
|
+
@forkable ||=
|
36
|
+
begin
|
37
|
+
Process.wait(
|
38
|
+
Process.fork do
|
39
|
+
Process.exit!
|
40
|
+
end
|
41
|
+
)
|
42
|
+
|
43
|
+
true
|
44
|
+
rescue NotImplementedError
|
45
|
+
false
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def self.generate_counter(count=0)
|
50
|
+
fun =
|
51
|
+
lambda do
|
52
|
+
Thread.exclusive do
|
53
|
+
count = count.succ
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
class << fun
|
58
|
+
alias next call
|
59
|
+
end
|
60
|
+
|
61
|
+
fun
|
62
|
+
end
|
63
|
+
|
64
|
+
@tempfile_counter = Util.generate_counter
|
65
|
+
|
66
|
+
def self.tempfile
|
67
|
+
File.join(tempdir, "%s.%d.%d.tmp" % ["fork_and_return", $$, @tempfile_counter.next])
|
68
|
+
end
|
69
|
+
|
70
|
+
def self.tempdir
|
71
|
+
[ENV["TMPDIR"], ENV["TMP"], ENV["TEMP"], "/tmp", "c:/temp"].compact.find{|dir| File.directory?(dir)}
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
data/test/test.rb
ADDED
@@ -0,0 +1,253 @@
|
|
1
|
+
require "test/unit"
|
2
|
+
require "forkandreturn"
|
3
|
+
|
4
|
+
class ForkAndReturnTest < Test::Unit::TestCase
|
5
|
+
class ForkAndReturnTestException < StandardError
|
6
|
+
end
|
7
|
+
|
8
|
+
def test_fork_and_return
|
9
|
+
a = 1
|
10
|
+
b = 2
|
11
|
+
c = 3
|
12
|
+
r = ForkAndReturn.fork_and_return{[a, [b, c]]}
|
13
|
+
|
14
|
+
assert_equal([1, [2, 3]], r)
|
15
|
+
end
|
16
|
+
|
17
|
+
def test_fork_and_return_later
|
18
|
+
a = 1
|
19
|
+
b = 2
|
20
|
+
c = 3
|
21
|
+
r = ForkAndReturn.fork_and_return_later{[a, [b, c]]}
|
22
|
+
|
23
|
+
assert_equal([1, [2, 3]], r.call)
|
24
|
+
assert_equal(Proc, r.class)
|
25
|
+
end
|
26
|
+
|
27
|
+
def test_fork_and_return_lambda
|
28
|
+
a = 1
|
29
|
+
b = 2
|
30
|
+
c = 3
|
31
|
+
wait = ForkAndReturn.fork_and_return_core{[a, [b, c]]}
|
32
|
+
assert_kind_of(Proc, wait)
|
33
|
+
load = wait.call
|
34
|
+
assert_kind_of(Proc, load)
|
35
|
+
result = load.call
|
36
|
+
assert_kind_of(Proc, result)
|
37
|
+
r = result.call
|
38
|
+
assert_equal([1, [2, 3]], r)
|
39
|
+
end
|
40
|
+
|
41
|
+
def test_fork_and_return_exception
|
42
|
+
assert_raise(ForkAndReturnTestException) do
|
43
|
+
ForkAndReturn.fork_and_return do
|
44
|
+
raise ForkAndReturnTestException
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def test_fork_and_return_later_exception
|
50
|
+
later =
|
51
|
+
ForkAndReturn.fork_and_return_later do
|
52
|
+
raise ForkAndReturnTestException
|
53
|
+
end
|
54
|
+
|
55
|
+
assert_equal(Proc, later.class)
|
56
|
+
|
57
|
+
assert_raise(ForkAndReturnTestException) do
|
58
|
+
later.call
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
def test_fork_and_return_lambda_exception
|
63
|
+
wait =
|
64
|
+
ForkAndReturn.fork_and_return_core do
|
65
|
+
raise ForkAndReturnTestException
|
66
|
+
end
|
67
|
+
|
68
|
+
load = wait.call
|
69
|
+
result = load.call
|
70
|
+
|
71
|
+
assert_raise(ForkAndReturnTestException) do
|
72
|
+
result.call
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
def test_fork_and_return_exit
|
77
|
+
wait =
|
78
|
+
ForkAndReturn.fork_and_return_core do
|
79
|
+
exit 1
|
80
|
+
end
|
81
|
+
|
82
|
+
load = wait.call
|
83
|
+
result = load.call
|
84
|
+
|
85
|
+
assert_raise(ForkAndReturn::WorkerError) do
|
86
|
+
result.call
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
class ForkAndReturnEnumerableTest < Test::Unit::TestCase
|
92
|
+
class ForkAndReturnEnumerableTestException < StandardError
|
93
|
+
end
|
94
|
+
|
95
|
+
def test_array
|
96
|
+
data = (1..10).to_a
|
97
|
+
block = lambda{|n| n**n}
|
98
|
+
result = data.collect(&block)
|
99
|
+
|
100
|
+
assert_equal(result, data.concurrent_collect(0, &block))
|
101
|
+
assert_equal(result, data.concurrent_collect(3, &block))
|
102
|
+
assert_equal(result, data.concurrent_collect(-1, &block))
|
103
|
+
end
|
104
|
+
|
105
|
+
def test_array_of_array
|
106
|
+
data = (1..10).zip(51..60)
|
107
|
+
block = lambda{|x, y| [x, y, x**y]}
|
108
|
+
result = data.collect(&block)
|
109
|
+
|
110
|
+
assert_equal(result, data.concurrent_collect(0, &block))
|
111
|
+
assert_equal(result, data.concurrent_collect(3, &block))
|
112
|
+
assert_equal(result, data.concurrent_collect(-1, &block))
|
113
|
+
end
|
114
|
+
|
115
|
+
def test_hash
|
116
|
+
data = Hash[*(1..10).zip(51..60).flatten]
|
117
|
+
block = lambda{|x, y| [x, y, x**y]}
|
118
|
+
result = data.collect(&block)
|
119
|
+
|
120
|
+
assert_equal(result, data.concurrent_collect(0, &block))
|
121
|
+
assert_equal(result, data.concurrent_collect(3, &block))
|
122
|
+
assert_equal(result, data.concurrent_collect(-1, &block))
|
123
|
+
end
|
124
|
+
|
125
|
+
def test_range
|
126
|
+
data = 1..10
|
127
|
+
block = lambda{|n| n**n}
|
128
|
+
result = data.collect(&block)
|
129
|
+
|
130
|
+
assert_equal(result, data.concurrent_collect(0, &block))
|
131
|
+
assert_equal(result, data.concurrent_collect(3, &block))
|
132
|
+
assert_equal(result, data.concurrent_collect(-1, &block))
|
133
|
+
end
|
134
|
+
|
135
|
+
def test_select
|
136
|
+
data = (1..10).zip(51..60)
|
137
|
+
block = lambda{|x, y| y%x==0}
|
138
|
+
result = data.select(&block)
|
139
|
+
|
140
|
+
assert_equal(result, data.concurrent_select(0, &block))
|
141
|
+
assert_equal(result, data.concurrent_select(3, &block))
|
142
|
+
assert_equal(result, data.concurrent_select(-1, &block))
|
143
|
+
end
|
144
|
+
|
145
|
+
def test_reject
|
146
|
+
data = (1..10).zip(51..60)
|
147
|
+
block = lambda{|x, y| y%x==0}
|
148
|
+
result = data.reject(&block)
|
149
|
+
|
150
|
+
assert_equal(result, data.concurrent_reject(0, &block))
|
151
|
+
assert_equal(result, data.concurrent_reject(3, &block))
|
152
|
+
assert_equal(result, data.concurrent_reject(-1, &block))
|
153
|
+
end
|
154
|
+
|
155
|
+
def test_each_as_well_as_scope
|
156
|
+
data = (1..10).zip(51..60)
|
157
|
+
count = nil
|
158
|
+
block = lambda{|x, y| count += x + y}
|
159
|
+
|
160
|
+
count = 0
|
161
|
+
result1 = data.each(&block)
|
162
|
+
result2 = count
|
163
|
+
|
164
|
+
count = 0
|
165
|
+
assert_equal(result1, data.concurrent_each(0, &block))
|
166
|
+
assert_equal(result2, count)
|
167
|
+
|
168
|
+
count = 0
|
169
|
+
assert_equal(result1, data.concurrent_each(3, &block))
|
170
|
+
assert_equal(0, count) # count isn't shared among processes!
|
171
|
+
|
172
|
+
count = 0
|
173
|
+
assert_equal(result1, data.concurrent_each(-1, &block))
|
174
|
+
assert_equal(0, count) # count isn't shared among processes!
|
175
|
+
end
|
176
|
+
|
177
|
+
def test_pids
|
178
|
+
data = 1..10
|
179
|
+
block = lambda{$$}
|
180
|
+
result = data.collect(&block)
|
181
|
+
|
182
|
+
assert_equal(result, data.concurrent_collect(0, &block))
|
183
|
+
assert_not_equal(result, data.concurrent_collect(3, &block))
|
184
|
+
assert_not_equal(result, data.concurrent_collect(-1, &block))
|
185
|
+
end
|
186
|
+
|
187
|
+
def test_exceptions
|
188
|
+
data = 1..10
|
189
|
+
block = lambda{|n| raise ForkAndReturnEnumerableTestException if n == 2}
|
190
|
+
|
191
|
+
assert_raise(ForkAndReturnEnumerableTestException){data.concurrent_collect(0, &block)}
|
192
|
+
assert_raise(ForkAndReturnEnumerableTestException){data.concurrent_collect(3, &block)}
|
193
|
+
assert_raise(ForkAndReturnEnumerableTestException){data.concurrent_collect(-1, &block)}
|
194
|
+
end
|
195
|
+
|
196
|
+
def test_at_exit_handler
|
197
|
+
data = 1..10
|
198
|
+
block = lambda{}
|
199
|
+
file = "/tmp/FORK_AND_RETURN_TEST"
|
200
|
+
|
201
|
+
File.delete(file) if File.file?(file)
|
202
|
+
|
203
|
+
at_exit do
|
204
|
+
File.open(file, "w"){|f| f.write "some data"}
|
205
|
+
end
|
206
|
+
|
207
|
+
data.concurrent_collect(&block)
|
208
|
+
|
209
|
+
assert(! File.file?(file))
|
210
|
+
end
|
211
|
+
|
212
|
+
def test_clustered_concurrent_collect
|
213
|
+
data = 1..10
|
214
|
+
block = lambda{$$}
|
215
|
+
result = data.collect(&block)
|
216
|
+
|
217
|
+
assert_equal(3, data.clustered_concurrent_collect(3, &block).sort.uniq.length)
|
218
|
+
|
219
|
+
assert_equal(result, data.clustered_concurrent_collect(0, &block))
|
220
|
+
assert_not_equal(result, data.clustered_concurrent_collect(3, &block))
|
221
|
+
assert_not_equal(result, data.clustered_concurrent_collect(-1, &block))
|
222
|
+
end
|
223
|
+
|
224
|
+
def test_clustered_select
|
225
|
+
data = (1..10).zip(51..60)
|
226
|
+
block = lambda{|x, y| y%x==0}
|
227
|
+
result = data.select(&block)
|
228
|
+
|
229
|
+
assert_equal(result, data.clustered_concurrent_select(0, &block))
|
230
|
+
assert_equal(result, data.clustered_concurrent_select(3, &block))
|
231
|
+
assert_equal(result, data.clustered_concurrent_select(-1, &block))
|
232
|
+
end
|
233
|
+
|
234
|
+
def test_clustered_reject
|
235
|
+
data = (1..10).zip(51..60)
|
236
|
+
block = lambda{|x, y| y%x==0}
|
237
|
+
result = data.reject(&block)
|
238
|
+
|
239
|
+
assert_equal(result, data.clustered_concurrent_reject(0, &block))
|
240
|
+
assert_equal(result, data.clustered_concurrent_reject(3, &block))
|
241
|
+
assert_equal(result, data.clustered_concurrent_reject(-1, &block))
|
242
|
+
end
|
243
|
+
|
244
|
+
def test_clustered_each
|
245
|
+
data = (1..10).zip(51..60)
|
246
|
+
block = lambda{|x, y| y%x==0}
|
247
|
+
result = data.each(&block)
|
248
|
+
|
249
|
+
assert_equal(result, data.clustered_concurrent_each(0, &block))
|
250
|
+
assert_equal(result, data.clustered_concurrent_each(3, &block))
|
251
|
+
assert_equal(result, data.clustered_concurrent_each(-1, &block))
|
252
|
+
end
|
253
|
+
end
|
metadata
ADDED
@@ -0,0 +1,75 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: forkandreturn
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Erik Veenstra
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
|
12
|
+
date: 2008-07-12 00:00:00 +02:00
|
13
|
+
default_executable:
|
14
|
+
dependencies:
|
15
|
+
- !ruby/object:Gem::Dependency
|
16
|
+
name: threadlimiter
|
17
|
+
version_requirement:
|
18
|
+
version_requirements: !ruby/object:Gem::Requirement
|
19
|
+
requirements:
|
20
|
+
- - ">="
|
21
|
+
- !ruby/object:Gem::Version
|
22
|
+
version: "0"
|
23
|
+
version:
|
24
|
+
description: Runs a block of code in a seperate process and collects the result later. Includes a lot of convenient methods on Enumerable.
|
25
|
+
email: forkandreturn@erikveen.dds.nl
|
26
|
+
executables: []
|
27
|
+
|
28
|
+
extensions: []
|
29
|
+
|
30
|
+
extra_rdoc_files: []
|
31
|
+
|
32
|
+
files:
|
33
|
+
- lib/forkandreturn
|
34
|
+
- lib/forkandreturn/exceptions.rb
|
35
|
+
- lib/forkandreturn/enumerable.rb
|
36
|
+
- lib/forkandreturn/forkandreturn.rb
|
37
|
+
- lib/forkandreturn/util.rb
|
38
|
+
- lib/forkandreturn.rb
|
39
|
+
- README
|
40
|
+
- LICENSE
|
41
|
+
- VERSION
|
42
|
+
has_rdoc: true
|
43
|
+
homepage: http://www.erikveen.dds.nl/forkandreturn/index.html
|
44
|
+
post_install_message:
|
45
|
+
rdoc_options:
|
46
|
+
- README
|
47
|
+
- LICENSE
|
48
|
+
- VERSION
|
49
|
+
- --title
|
50
|
+
- forkandreturn (0.1.0)
|
51
|
+
- --main
|
52
|
+
- README
|
53
|
+
require_paths:
|
54
|
+
- lib
|
55
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
56
|
+
requirements:
|
57
|
+
- - ">="
|
58
|
+
- !ruby/object:Gem::Version
|
59
|
+
version: "0"
|
60
|
+
version:
|
61
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
62
|
+
requirements:
|
63
|
+
- - ">="
|
64
|
+
- !ruby/object:Gem::Version
|
65
|
+
version: "0"
|
66
|
+
version:
|
67
|
+
requirements: []
|
68
|
+
|
69
|
+
rubyforge_project: forkandreturn
|
70
|
+
rubygems_version: 1.1.1
|
71
|
+
signing_key:
|
72
|
+
specification_version: 2
|
73
|
+
summary: Runs a block of code in a seperate process and collects the result later. Includes a lot of convenient methods on Enumerable.
|
74
|
+
test_files:
|
75
|
+
- test/test.rb
|