comp_tree 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README +153 -0
- data/Rakefile +152 -0
- data/comp_tree.gemspec +38 -0
- data/contrib/quix/Rakefile +16 -0
- data/contrib/quix/install.rb +3 -0
- data/contrib/quix/lib/quix/builtin/dir/casefold_brackets.rb +7 -0
- data/contrib/quix/lib/quix/builtin/kernel/tap.rb +9 -0
- data/contrib/quix/lib/quix/builtin/module/include.rb +21 -0
- data/contrib/quix/lib/quix/builtin/module/private.rb +41 -0
- data/contrib/quix/lib/quix/config.rb +37 -0
- data/contrib/quix/lib/quix/cygwin.rb +60 -0
- data/contrib/quix/lib/quix/diagnostic.rb +44 -0
- data/contrib/quix/lib/quix/enumerable.rb +33 -0
- data/contrib/quix/lib/quix/fileutils.rb +37 -0
- data/contrib/quix/lib/quix/hash_struct.rb +27 -0
- data/contrib/quix/lib/quix/kernel.rb +61 -0
- data/contrib/quix/lib/quix/lazy_struct.rb +55 -0
- data/contrib/quix/lib/quix/simple_installer.rb +87 -0
- data/contrib/quix/lib/quix/string.rb +38 -0
- data/contrib/quix/lib/quix/subpackager.rb +52 -0
- data/contrib/quix/lib/quix/thread_local.rb +32 -0
- data/contrib/quix/lib/quix/vars.rb +138 -0
- data/contrib/quix/lib/quix.rb +32 -0
- data/contrib/quix/test/all.rb +12 -0
- data/contrib/quix/test/test_deps.rb +25 -0
- data/contrib/quix/test/test_include.rb +47 -0
- data/contrib/quix/test/test_private.rb +86 -0
- data/contrib/quix/test/test_root.rb +19 -0
- data/contrib/quix/test/test_struct.rb +48 -0
- data/contrib/quix/test/test_vars.rb +187 -0
- data/install.rb +3 -0
- data/lib/comp_tree/algorithm.rb +210 -0
- data/lib/comp_tree/bucket_ipc.rb +151 -0
- data/lib/comp_tree/driver.rb +267 -0
- data/lib/comp_tree/error.rb +27 -0
- data/lib/comp_tree/node.rb +165 -0
- data/lib/comp_tree/quix/builtin/kernel/tap.rb +33 -0
- data/lib/comp_tree/quix/diagnostic.rb +68 -0
- data/lib/comp_tree/quix/kernel.rb +85 -0
- data/lib/comp_tree/retriable_fork.rb +42 -0
- data/lib/comp_tree/task_node.rb +22 -0
- data/lib/comp_tree.rb +23 -0
- data/test/all.rb +12 -0
- data/test/test_bucketipc.rb +72 -0
- data/test/test_circular.rb +36 -0
- data/test/test_comp_tree.rb +364 -0
- data/test/test_exception.rb +97 -0
- metadata +120 -0
@@ -0,0 +1,48 @@
|
|
1
|
+
$LOAD_PATH.unshift "#{File.dirname(__FILE__)}/../lib"
|
2
|
+
|
3
|
+
require 'test/unit'
|
4
|
+
require 'quix/lazy_struct'
|
5
|
+
require 'quix/hash_struct'
|
6
|
+
|
7
|
+
class TestLazyStruct < Test::Unit::TestCase
|
8
|
+
def common(s)
|
9
|
+
s.f = 33
|
10
|
+
assert_equal(33, s.f)
|
11
|
+
|
12
|
+
n = 0
|
13
|
+
s.attribute(:f) {
|
14
|
+
n += 1
|
15
|
+
44
|
16
|
+
}
|
17
|
+
|
18
|
+
3.times {
|
19
|
+
assert_equal(44, s.f)
|
20
|
+
}
|
21
|
+
assert_equal(1, n)
|
22
|
+
end
|
23
|
+
|
24
|
+
def test_1
|
25
|
+
common(Quix::LazyStruct.new)
|
26
|
+
end
|
27
|
+
|
28
|
+
def test_2
|
29
|
+
s = OpenStruct.new
|
30
|
+
class << s
|
31
|
+
include Quix::LazyStruct::Mixin
|
32
|
+
end
|
33
|
+
common(s)
|
34
|
+
end
|
35
|
+
|
36
|
+
def test_3
|
37
|
+
s = Quix::HashStruct.new
|
38
|
+
class << s
|
39
|
+
include Quix::LazyStruct::Mixin
|
40
|
+
end
|
41
|
+
common(s)
|
42
|
+
s[:g] = 55
|
43
|
+
assert_equal(
|
44
|
+
{ :f => 33, :g => 55 },
|
45
|
+
s.keys.inject(Hash.new) { |acc, e| acc.merge(e => s[e]) })
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
@@ -0,0 +1,187 @@
|
|
1
|
+
$LOAD_PATH.unshift "#{File.dirname(__FILE__)}/../lib"
|
2
|
+
|
3
|
+
require 'test/unit'
|
4
|
+
require 'quix/vars'
|
5
|
+
require 'quix/hash_struct'
|
6
|
+
|
7
|
+
class TestVars < Test::Unit::TestCase
|
8
|
+
include Quix::Vars
|
9
|
+
|
10
|
+
def test_locals_to_hash
|
11
|
+
a = 33
|
12
|
+
b = Object.new
|
13
|
+
c = lambda { a + 11 }
|
14
|
+
|
15
|
+
hash = locals_to_hash {%{a, b, c}}
|
16
|
+
|
17
|
+
assert_equal(a.object_id, hash[:a].object_id)
|
18
|
+
assert_equal(b.object_id, hash[:b].object_id)
|
19
|
+
assert_equal(c.object_id, hash[:c].object_id)
|
20
|
+
|
21
|
+
assert_equal(hash[:c].call, 44)
|
22
|
+
end
|
23
|
+
|
24
|
+
def test_hash_to_locals
|
25
|
+
a = nil
|
26
|
+
b = nil
|
27
|
+
c = nil
|
28
|
+
|
29
|
+
hash = {
|
30
|
+
:a => 33,
|
31
|
+
:b => Object.new,
|
32
|
+
:c => lambda { hash[:a] + 11 },
|
33
|
+
}
|
34
|
+
|
35
|
+
hash_to_locals { hash }
|
36
|
+
|
37
|
+
assert_equal(a.object_id, hash[:a].object_id)
|
38
|
+
assert_equal(b.object_id, hash[:b].object_id)
|
39
|
+
assert_equal(c.object_id, hash[:c].object_id)
|
40
|
+
|
41
|
+
assert_equal(hash[:c].call, 44)
|
42
|
+
assert_nothing_raised { hash_to_locals { nil } }
|
43
|
+
end
|
44
|
+
|
45
|
+
def test_with_readers
|
46
|
+
hash = {
|
47
|
+
:a => 33,
|
48
|
+
:b => Object.new,
|
49
|
+
:c => lambda { hash[:a] + 11 },
|
50
|
+
}
|
51
|
+
|
52
|
+
assert_raise(NameError) { a }
|
53
|
+
assert_raise(NameError) { b }
|
54
|
+
assert_raise(NameError) { c }
|
55
|
+
|
56
|
+
with_readers(hash) {
|
57
|
+
assert_equal(a.object_id, hash[:a].object_id)
|
58
|
+
assert_equal(b.object_id, hash[:b].object_id)
|
59
|
+
assert_equal(c.object_id, hash[:c].object_id)
|
60
|
+
}
|
61
|
+
|
62
|
+
assert_raise(NameError) { a }
|
63
|
+
assert_raise(NameError) { b }
|
64
|
+
assert_raise(NameError) { c }
|
65
|
+
|
66
|
+
with_readers(hash, :a, :b) {
|
67
|
+
assert_equal(a.object_id, hash[:a].object_id)
|
68
|
+
assert_equal(b.object_id, hash[:b].object_id)
|
69
|
+
assert_raise(NameError) { c }
|
70
|
+
}
|
71
|
+
end
|
72
|
+
|
73
|
+
def test_locals_to_ivs
|
74
|
+
a = 33
|
75
|
+
b = Object.new
|
76
|
+
c = lambda { a + 11 }
|
77
|
+
|
78
|
+
assert(!defined?(@a))
|
79
|
+
assert(!defined?(@b))
|
80
|
+
assert(!defined?(@c))
|
81
|
+
|
82
|
+
locals_to_ivs {%{a, b, c}}
|
83
|
+
|
84
|
+
assert_equal(a.object_id, @a.object_id)
|
85
|
+
assert_equal(b.object_id, @b.object_id)
|
86
|
+
assert_equal(c.object_id, @c.object_id)
|
87
|
+
|
88
|
+
assert_equal(@c.call, 44)
|
89
|
+
end
|
90
|
+
|
91
|
+
def test_hash_to_ivs
|
92
|
+
hash = {
|
93
|
+
:d => 33,
|
94
|
+
:e => Object.new,
|
95
|
+
:f => lambda { hash[:d] + 11 },
|
96
|
+
}
|
97
|
+
|
98
|
+
assert(!defined?(@d))
|
99
|
+
assert(!defined?(@e))
|
100
|
+
assert(!defined?(@f))
|
101
|
+
|
102
|
+
hash_to_ivs { hash }
|
103
|
+
|
104
|
+
assert_equal(hash[:d].object_id, @d.object_id)
|
105
|
+
assert_equal(hash[:e].object_id, @e.object_id)
|
106
|
+
assert_equal(hash[:f].object_id, @f.object_id)
|
107
|
+
|
108
|
+
assert_equal(hash[:f].call, 44)
|
109
|
+
assert_nothing_raised { hash_to_ivs { nil } }
|
110
|
+
end
|
111
|
+
|
112
|
+
def test_config_to_hash
|
113
|
+
config = %q{
|
114
|
+
a = 33
|
115
|
+
b = a + 11
|
116
|
+
c = 5*(a - 22)
|
117
|
+
d = (1..3).map { |n| n*n }
|
118
|
+
e = "moo"
|
119
|
+
f = lambda { a + 66 }
|
120
|
+
|
121
|
+
a_object_id = a.object_id
|
122
|
+
b_object_id = b.object_id
|
123
|
+
c_object_id = c.object_id
|
124
|
+
d_object_id = d.object_id
|
125
|
+
e_object_id = e.object_id
|
126
|
+
f_object_id = f.object_id
|
127
|
+
}
|
128
|
+
|
129
|
+
hash = config_to_hash(config)
|
130
|
+
|
131
|
+
assert_equal(hash[:a], 33)
|
132
|
+
assert_equal(hash[:b], 44)
|
133
|
+
assert_equal(hash[:c], 55)
|
134
|
+
assert_equal(hash[:d], [1, 4, 9])
|
135
|
+
assert_equal(hash[:e], "moo")
|
136
|
+
assert_equal(hash[:f].call, 99)
|
137
|
+
|
138
|
+
assert_equal(hash[:a].object_id, hash[:a_object_id])
|
139
|
+
assert_equal(hash[:b].object_id, hash[:b_object_id])
|
140
|
+
assert_equal(hash[:c].object_id, hash[:c_object_id])
|
141
|
+
assert_equal(hash[:d].object_id, hash[:d_object_id])
|
142
|
+
assert_equal(hash[:e].object_id, hash[:e_object_id])
|
143
|
+
assert_equal(hash[:f].object_id, hash[:f_object_id])
|
144
|
+
end
|
145
|
+
|
146
|
+
def test_hash_struct
|
147
|
+
hash = {
|
148
|
+
:a => {
|
149
|
+
:b => :c,
|
150
|
+
:d => :e,
|
151
|
+
:f => {
|
152
|
+
:g => :h,
|
153
|
+
:i => :j,
|
154
|
+
},
|
155
|
+
},
|
156
|
+
:k => :l,
|
157
|
+
:m => [ :n, :o, :p ],
|
158
|
+
:q => {
|
159
|
+
:r => {},
|
160
|
+
:s => [],
|
161
|
+
},
|
162
|
+
:t => [
|
163
|
+
{
|
164
|
+
:u => :v,
|
165
|
+
:w => :x,
|
166
|
+
},
|
167
|
+
],
|
168
|
+
:w => {
|
169
|
+
:x => {
|
170
|
+
:y => :z,
|
171
|
+
},
|
172
|
+
},
|
173
|
+
}
|
174
|
+
|
175
|
+
s = Quix::HashStruct.recursive_new(hash)
|
176
|
+
assert_equal(s.a.b, :c)
|
177
|
+
assert_equal(s.a.d, :e)
|
178
|
+
assert_equal(s.a.f.g, :h)
|
179
|
+
assert_equal(s.a.f.i, :j)
|
180
|
+
assert_equal(s.k, :l)
|
181
|
+
assert_equal(s.m, [:n, :o, :p])
|
182
|
+
assert_equal(s.q.r, OpenStruct.new)
|
183
|
+
assert_equal(s.q.s, [])
|
184
|
+
assert_equal(s.t, [{ :u => :v, :w => :x }])
|
185
|
+
assert_equal(s.w.x.y, :z)
|
186
|
+
end
|
187
|
+
end
|
data/install.rb
ADDED
@@ -0,0 +1,210 @@
|
|
1
|
+
|
2
|
+
require 'comp_tree/quix/diagnostic'
|
3
|
+
require 'comp_tree/retriable_fork'
|
4
|
+
|
5
|
+
module CompTree
|
6
|
+
module Algorithm
|
7
|
+
include Quix::Diagnostic
|
8
|
+
|
9
|
+
def compute_multithreaded(root, num_threads, use_fork, buckets)
|
10
|
+
trace "Computing #{root.name} with #{num_threads} threads"
|
11
|
+
result = nil
|
12
|
+
mutex = Mutex.new
|
13
|
+
node_finished_condition = ConditionVariable.new
|
14
|
+
thread_wake_condition = ConditionVariable.new
|
15
|
+
threads = []
|
16
|
+
|
17
|
+
# workaround: jruby gives "run" status for waiting on
|
18
|
+
# condition variable
|
19
|
+
num_threads_ready = 0
|
20
|
+
|
21
|
+
num_threads.times { |thread_index|
|
22
|
+
threads << Thread.new {
|
23
|
+
#
|
24
|
+
# wait for main thread
|
25
|
+
#
|
26
|
+
mutex.synchronize {
|
27
|
+
trace "Thread #{thread_index} waiting to start"
|
28
|
+
num_threads_ready += 1
|
29
|
+
thread_wake_condition.wait(mutex)
|
30
|
+
}
|
31
|
+
|
32
|
+
while true
|
33
|
+
trace "Thread #{thread_index} node search"
|
34
|
+
|
35
|
+
#
|
36
|
+
# Done! Thread will exit.
|
37
|
+
#
|
38
|
+
break if mutex.synchronize {
|
39
|
+
result
|
40
|
+
}
|
41
|
+
|
42
|
+
#
|
43
|
+
# Lock the tree and find a node. The node we
|
44
|
+
# obtain, if any, is already locked.
|
45
|
+
#
|
46
|
+
node = mutex.synchronize {
|
47
|
+
find_node(root)
|
48
|
+
}
|
49
|
+
|
50
|
+
if node
|
51
|
+
trace "Thread #{thread_index} found node #{node.name}"
|
52
|
+
|
53
|
+
node_result =
|
54
|
+
compute_node(
|
55
|
+
node,
|
56
|
+
use_fork,
|
57
|
+
buckets ? buckets[thread_index] : nil)
|
58
|
+
|
59
|
+
mutex.synchronize {
|
60
|
+
node.result = node_result
|
61
|
+
}
|
62
|
+
|
63
|
+
#
|
64
|
+
# remove locks for this node (shared lock and own lock)
|
65
|
+
#
|
66
|
+
mutex.synchronize {
|
67
|
+
node.unlock
|
68
|
+
if node == root
|
69
|
+
#
|
70
|
+
# Root node was computed; we are done.
|
71
|
+
#
|
72
|
+
trace "Thread #{thread_index} got final answer"
|
73
|
+
result = root.result
|
74
|
+
end
|
75
|
+
node_finished_condition.signal
|
76
|
+
}
|
77
|
+
else
|
78
|
+
trace "Thread #{thread_index}: no node found; sleeping."
|
79
|
+
mutex.synchronize {
|
80
|
+
thread_wake_condition.wait(mutex)
|
81
|
+
}
|
82
|
+
end
|
83
|
+
end
|
84
|
+
trace "Thread #{thread_index} exiting"
|
85
|
+
}
|
86
|
+
}
|
87
|
+
|
88
|
+
trace "Main: waiting for threads to launch and block."
|
89
|
+
while true
|
90
|
+
break if mutex.synchronize {
|
91
|
+
num_threads_ready == num_threads
|
92
|
+
}
|
93
|
+
Thread.pass
|
94
|
+
end
|
95
|
+
|
96
|
+
trace "Main: entering main loop"
|
97
|
+
mutex.synchronize {
|
98
|
+
while true
|
99
|
+
trace "Main: waking threads"
|
100
|
+
thread_wake_condition.broadcast
|
101
|
+
|
102
|
+
if result
|
103
|
+
trace "Main: detected finish."
|
104
|
+
break
|
105
|
+
end
|
106
|
+
|
107
|
+
trace "Main: waiting for a node"
|
108
|
+
node_finished_condition.wait(mutex)
|
109
|
+
trace "Main: got a node"
|
110
|
+
end
|
111
|
+
}
|
112
|
+
|
113
|
+
trace "Main: waiting for threads to finish."
|
114
|
+
catch(:done) {
|
115
|
+
while true
|
116
|
+
mutex.synchronize {
|
117
|
+
throw :done if threads.all? { |thread|
|
118
|
+
thread.status == false
|
119
|
+
}
|
120
|
+
thread_wake_condition.broadcast
|
121
|
+
}
|
122
|
+
Thread.pass
|
123
|
+
end
|
124
|
+
}
|
125
|
+
|
126
|
+
trace "Main: computation done."
|
127
|
+
result
|
128
|
+
end
|
129
|
+
|
130
|
+
def find_node(node)
|
131
|
+
# --- only called inside mutex
|
132
|
+
trace "Looking for a node, starting with #{node.name}"
|
133
|
+
if node.result
|
134
|
+
#
|
135
|
+
# already computed
|
136
|
+
#
|
137
|
+
trace "#{node.name} has been computed"
|
138
|
+
nil
|
139
|
+
elsif node.children_results and node.try_lock
|
140
|
+
#
|
141
|
+
# Node is not computed and its children are computed;
|
142
|
+
# and we have the lock. Ready to compute.
|
143
|
+
#
|
144
|
+
node
|
145
|
+
else
|
146
|
+
#
|
147
|
+
# locked or children not computed; recurse to children
|
148
|
+
#
|
149
|
+
trace "Checking #{node.name}'s children"
|
150
|
+
node.each_child { |child|
|
151
|
+
if next_node = find_node(child)
|
152
|
+
return next_node
|
153
|
+
end
|
154
|
+
}
|
155
|
+
nil
|
156
|
+
end
|
157
|
+
end
|
158
|
+
|
159
|
+
def compute_node(node, use_fork, bucket)
|
160
|
+
if use_fork
|
161
|
+
trace "About to fork for node #{node.name}"
|
162
|
+
if bucket
|
163
|
+
#
|
164
|
+
# Use our assigned bucket to transfer the result.
|
165
|
+
#
|
166
|
+
fork_node(node) {
|
167
|
+
node.trace_compute
|
168
|
+
bucket.contents = node.compute
|
169
|
+
}
|
170
|
+
bucket.contents
|
171
|
+
else
|
172
|
+
#
|
173
|
+
# No bucket -- discarding result
|
174
|
+
#
|
175
|
+
fork_node(node) {
|
176
|
+
node.trace_compute
|
177
|
+
node.compute
|
178
|
+
}
|
179
|
+
true
|
180
|
+
end
|
181
|
+
else
|
182
|
+
#
|
183
|
+
# No fork
|
184
|
+
#
|
185
|
+
node.trace_compute
|
186
|
+
node.compute
|
187
|
+
end
|
188
|
+
end
|
189
|
+
|
190
|
+
def fork_node(node)
|
191
|
+
trace "About to fork for node #{node.name}"
|
192
|
+
process_id = RetriableFork.fork {
|
193
|
+
trace "Fork: process #{Process.pid}"
|
194
|
+
node.trace_compute
|
195
|
+
yield
|
196
|
+
trace "Fork: computation done"
|
197
|
+
}
|
198
|
+
trace "Waiting for process #{process_id}"
|
199
|
+
Process.wait(process_id)
|
200
|
+
trace "Process #{process_id} finished"
|
201
|
+
exitstatus = $?.exitstatus
|
202
|
+
if exitstatus != 0
|
203
|
+
trace "Process #{process_id} returned #{exitstatus}; exiting."
|
204
|
+
exit(1)
|
205
|
+
end
|
206
|
+
end
|
207
|
+
|
208
|
+
extend self
|
209
|
+
end
|
210
|
+
end
|
@@ -0,0 +1,151 @@
|
|
1
|
+
|
2
|
+
require 'drb'
|
3
|
+
require 'thread'
|
4
|
+
|
5
|
+
require 'comp_tree/retriable_fork'
|
6
|
+
require 'comp_tree/quix/diagnostic'
|
7
|
+
require 'comp_tree/quix/builtin/kernel/tap'
|
8
|
+
|
9
|
+
module CompTree
|
10
|
+
module BucketIPC
|
11
|
+
class Bucket
|
12
|
+
include Quix::Diagnostic
|
13
|
+
include RetriableFork
|
14
|
+
|
15
|
+
def initialize(address, timeout, wait_interval)
|
16
|
+
trace "Making bucket with address #{address}"
|
17
|
+
|
18
|
+
@remote_pid = fork {
|
19
|
+
own_object = Class.new {
|
20
|
+
attr_accessor(:contents)
|
21
|
+
}.new
|
22
|
+
server = DRb.start_service(address, own_object)
|
23
|
+
debug {
|
24
|
+
server.verbose = true
|
25
|
+
}
|
26
|
+
DRb.thread.join
|
27
|
+
}
|
28
|
+
|
29
|
+
@remote_object = DRbObject.new_with_uri(address)
|
30
|
+
@address = address
|
31
|
+
@timeout = timeout
|
32
|
+
@wait_interval = wait_interval
|
33
|
+
end
|
34
|
+
|
35
|
+
attr_accessor(:timeout, :wait_interval)
|
36
|
+
attr_reader(:address)
|
37
|
+
|
38
|
+
def contents=(new_contents)
|
39
|
+
connect {
|
40
|
+
@remote_object.contents = new_contents
|
41
|
+
}
|
42
|
+
end
|
43
|
+
|
44
|
+
def contents
|
45
|
+
connect {
|
46
|
+
@remote_object.contents
|
47
|
+
}
|
48
|
+
end
|
49
|
+
|
50
|
+
def stop
|
51
|
+
Process.kill("TERM", @remote_pid)
|
52
|
+
end
|
53
|
+
|
54
|
+
private
|
55
|
+
|
56
|
+
def connect
|
57
|
+
begin
|
58
|
+
return yield
|
59
|
+
rescue DRb::DRbConnError
|
60
|
+
start = Time.now
|
61
|
+
begin
|
62
|
+
Kernel.sleep(@wait_interval)
|
63
|
+
return yield
|
64
|
+
rescue DRb::DRbConnError
|
65
|
+
if Time.now - start > @timeout
|
66
|
+
raise
|
67
|
+
end
|
68
|
+
retry
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
class DriverBase
|
75
|
+
def initialize(addresses, timeout, wait_interval)
|
76
|
+
begin
|
77
|
+
@buckets = addresses.map { |address|
|
78
|
+
Bucket.new(address, timeout, wait_interval)
|
79
|
+
}
|
80
|
+
if block_given?
|
81
|
+
yield @buckets
|
82
|
+
end
|
83
|
+
ensure
|
84
|
+
if block_given?
|
85
|
+
stop
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
def stop
|
91
|
+
if defined?(@buckets)
|
92
|
+
@buckets.each { |bucket|
|
93
|
+
bucket.stop
|
94
|
+
}
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
class Driver < DriverBase
|
100
|
+
DEFAULTS = {
|
101
|
+
:timeout => 0.5,
|
102
|
+
:wait_interval => 0.05,
|
103
|
+
:port_start => 18181,
|
104
|
+
}
|
105
|
+
|
106
|
+
module BucketCounter
|
107
|
+
@mutex = Mutex.new
|
108
|
+
@count = 0
|
109
|
+
class << self
|
110
|
+
def increment_count
|
111
|
+
@mutex.synchronize {
|
112
|
+
@count += 1
|
113
|
+
}
|
114
|
+
end
|
115
|
+
|
116
|
+
def map_indexes(num_buckets)
|
117
|
+
Array.new.tap { |result|
|
118
|
+
num_buckets.times {
|
119
|
+
result << yield(increment_count)
|
120
|
+
}
|
121
|
+
}
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
def initialize(num_buckets, opts_in = {})
|
127
|
+
opts = DEFAULTS.merge(opts_in)
|
128
|
+
|
129
|
+
addresses =
|
130
|
+
if RetriableFork::HAVE_FORK
|
131
|
+
#
|
132
|
+
# Assume the existence of fork implies a unix machine.
|
133
|
+
#
|
134
|
+
require 'drb/unix'
|
135
|
+
basename = "drbunix://#{Dir.tmpdir}/bucket.#{Process.pid}.#{rand}"
|
136
|
+
BucketCounter.map_indexes(num_buckets) { |index|
|
137
|
+
"#{basename}.#{index}"
|
138
|
+
}
|
139
|
+
else
|
140
|
+
#
|
141
|
+
# Fallback: use the default socket.
|
142
|
+
#
|
143
|
+
BucketCounter.map_indexes(num_buckets) { |index|
|
144
|
+
"druby://localhost:#{opts[:port_start] + index}"
|
145
|
+
}
|
146
|
+
end
|
147
|
+
super(addresses, opts[:timeout], opts[:wait_interval])
|
148
|
+
end
|
149
|
+
end
|
150
|
+
end
|
151
|
+
end
|