vcap-concurrency 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +18 -0
- data/Gemfile +3 -0
- data/LICENSE +7136 -0
- data/README.md +3 -0
- data/Rakefile +15 -0
- data/lib/vcap/concurrency.rb +4 -0
- data/lib/vcap/concurrency/atomic_var.rb +68 -0
- data/lib/vcap/concurrency/errors.rb +7 -0
- data/lib/vcap/concurrency/promise.rb +97 -0
- data/lib/vcap/concurrency/proxy.rb +18 -0
- data/lib/vcap/concurrency/thread_pool.rb +152 -0
- data/lib/vcap/concurrency/version.rb +5 -0
- data/spec/atomic_var_spec.rb +56 -0
- data/spec/promise_spec.rb +83 -0
- data/spec/proxy_spec.rb +13 -0
- data/spec/spec_helper.rb +3 -0
- data/spec/thread_pool_spec.rb +175 -0
- data/vcap-concurrency.gemspec +22 -0
- metadata +116 -0
data/README.md
ADDED
data/Rakefile
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
#!/usr/bin/env rake
|
2
|
+
require "bundler/gem_tasks"
|
3
|
+
require "ci/reporter/rake/rspec"
|
4
|
+
require "rspec/core/rake_task"
|
5
|
+
|
6
|
+
desc "Run all specs"
|
7
|
+
RSpec::Core::RakeTask.new("spec") do |t|
|
8
|
+
t.rspec_opts = %w[--color --format documentation]
|
9
|
+
end
|
10
|
+
|
11
|
+
desc "Run all specs and provide output for ci"
|
12
|
+
RSpec::Core::RakeTask.new("spec:ci" => "ci:setup:rspec") do |t|
|
13
|
+
t.rspec_opts = %w[--no-color --format documentation]
|
14
|
+
end
|
15
|
+
|
@@ -0,0 +1,68 @@
|
|
1
|
+
require "thread"
|
2
|
+
|
3
|
+
module VCAP
|
4
|
+
module Concurrency
|
5
|
+
end
|
6
|
+
end
|
7
|
+
|
8
|
+
# A variable that can be queried and updated atomically.
|
9
|
+
class VCAP::Concurrency::AtomicVar
|
10
|
+
def initialize(initial_value = nil)
|
11
|
+
@value = initial_value
|
12
|
+
@lock = Mutex.new
|
13
|
+
@cond = ConditionVariable.new
|
14
|
+
end
|
15
|
+
|
16
|
+
# @return [Object] The value bound to this variable.
|
17
|
+
def value
|
18
|
+
@lock.synchronize { @value }
|
19
|
+
end
|
20
|
+
|
21
|
+
# Blocks the calling thread until the current value is different from the
|
22
|
+
# supplied value.
|
23
|
+
#
|
24
|
+
# @param [Object] last_value This method will return once the current
|
25
|
+
# value no longer equals last_value.
|
26
|
+
#
|
27
|
+
# @return [Object] The new value
|
28
|
+
def wait_value_changed(last_value)
|
29
|
+
done = false
|
30
|
+
result = nil
|
31
|
+
|
32
|
+
while !done
|
33
|
+
@lock.synchronize do
|
34
|
+
if last_value == @value
|
35
|
+
@cond.wait(@lock)
|
36
|
+
else
|
37
|
+
done = true
|
38
|
+
result = @value
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
result
|
44
|
+
end
|
45
|
+
|
46
|
+
def value=(new_value)
|
47
|
+
mutate { |v| new_value }
|
48
|
+
end
|
49
|
+
|
50
|
+
# Allows the caller to atomically mutate the current value. The new value
|
51
|
+
# will be whatever the supplied block evalutes to.
|
52
|
+
#
|
53
|
+
# @param [Block] blk The block to execute while the lock is held. The
|
54
|
+
# current value will be passed as the only argument to
|
55
|
+
# the block.
|
56
|
+
#
|
57
|
+
# @return [Object] The result of the block (also the new value bound to
|
58
|
+
# the var).
|
59
|
+
def mutate(&blk)
|
60
|
+
@lock.synchronize do
|
61
|
+
@value = blk.call(@value)
|
62
|
+
|
63
|
+
@cond.broadcast
|
64
|
+
|
65
|
+
@value
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
@@ -0,0 +1,97 @@
|
|
1
|
+
require "thread"
|
2
|
+
|
3
|
+
require "vcap/concurrency/errors"
|
4
|
+
|
5
|
+
module VCAP
|
6
|
+
module Concurrency
|
7
|
+
end
|
8
|
+
end
|
9
|
+
|
10
|
+
# A promise represents the intent to complete a unit of work at some point
|
11
|
+
# in the future.
|
12
|
+
class VCAP::Concurrency::Promise
|
13
|
+
def initialize
|
14
|
+
@lock = Mutex.new
|
15
|
+
@cond = ConditionVariable.new
|
16
|
+
@done = false
|
17
|
+
@result = nil
|
18
|
+
@error = nil
|
19
|
+
end
|
20
|
+
|
21
|
+
# Fulfills the promise successfully. Anyone blocking on the result will be
|
22
|
+
# notified immediately.
|
23
|
+
#
|
24
|
+
# @param [Object] result The result of the associated computation.
|
25
|
+
#
|
26
|
+
# @return [nil]
|
27
|
+
def deliver(result = nil)
|
28
|
+
@lock.synchronize do
|
29
|
+
assert_not_done
|
30
|
+
|
31
|
+
@result = result
|
32
|
+
@done = true
|
33
|
+
|
34
|
+
@cond.broadcast
|
35
|
+
end
|
36
|
+
|
37
|
+
nil
|
38
|
+
end
|
39
|
+
|
40
|
+
# Fulfills the promise unsuccessfully. Anyone blocking on the result will
|
41
|
+
# be notified immediately.
|
42
|
+
#
|
43
|
+
# NB: The supplied exception will be re raised in the caller of #resolve().
|
44
|
+
#
|
45
|
+
# @param [Exception] The error that occurred while fulfilling the promise.
|
46
|
+
#
|
47
|
+
# @return [nil]
|
48
|
+
def fail(exception)
|
49
|
+
@lock.synchronize do
|
50
|
+
assert_not_done
|
51
|
+
|
52
|
+
@error = exception
|
53
|
+
@done = true
|
54
|
+
|
55
|
+
@cond.broadcast
|
56
|
+
end
|
57
|
+
|
58
|
+
nil
|
59
|
+
end
|
60
|
+
|
61
|
+
# Waits for the promise to be fulfilled. Blocks the calling thread if the
|
62
|
+
# promise has not been fulfilled, otherwise it returns immediately.
|
63
|
+
#
|
64
|
+
# NB: If the promise failed to be fulfilled, the error that occurred while
|
65
|
+
# fulfilling it will be raised here.
|
66
|
+
#
|
67
|
+
# @param [Integer] timeout_secs If supplied, wait for no longer than this
|
68
|
+
# value before proceeding. An exception will be raised if the promise hasn't
|
69
|
+
# been fulfilled when the timeout occurs.
|
70
|
+
#
|
71
|
+
# @raise [VCAP::Concurrency::TimeoutError] Raised if the promise hasn't been
|
72
|
+
# fulfilled after +timeout_secs+ seconds since calling resolve().
|
73
|
+
#
|
74
|
+
# @return [Object] The result of the associated computation.
|
75
|
+
def resolve(timeout_secs = nil)
|
76
|
+
@lock.synchronize do
|
77
|
+
@cond.wait(@lock, timeout_secs) unless @done
|
78
|
+
|
79
|
+
if !@done
|
80
|
+
emsg = "Timed out waiting on result after #{timeout_secs}s."
|
81
|
+
raise VCAP::Concurrency::TimeoutError.new(emsg)
|
82
|
+
end
|
83
|
+
|
84
|
+
if @error
|
85
|
+
raise @error
|
86
|
+
else
|
87
|
+
@result
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
91
|
+
|
92
|
+
private
|
93
|
+
|
94
|
+
def assert_not_done
|
95
|
+
raise "A promise may only be completed once." if @done
|
96
|
+
end
|
97
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
require "thread"
|
2
|
+
|
3
|
+
module VCAP
|
4
|
+
module Concurrency
|
5
|
+
end
|
6
|
+
end
|
7
|
+
|
8
|
+
# A coarse grained thread-safe proxy object
|
9
|
+
class VCAP::Concurrency::Proxy
|
10
|
+
def initialize(orig)
|
11
|
+
@orig = orig
|
12
|
+
@lock = Mutex.new
|
13
|
+
end
|
14
|
+
|
15
|
+
def method_missing(meth, *args, &blk)
|
16
|
+
@lock.synchronize { @orig.send(meth, *args, &blk) }
|
17
|
+
end
|
18
|
+
end
|
@@ -0,0 +1,152 @@
|
|
1
|
+
require "thread"
|
2
|
+
|
3
|
+
require "vcap/concurrency/atomic_var"
|
4
|
+
require "vcap/concurrency/promise"
|
5
|
+
|
6
|
+
module VCAP
|
7
|
+
module Concurrency
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
class VCAP::Concurrency::ThreadPool
|
12
|
+
STOP_SENTINEL = :stop
|
13
|
+
|
14
|
+
STATE_CREATED = 0
|
15
|
+
STATE_STARTED = 1
|
16
|
+
STATE_STOPPED = 2
|
17
|
+
|
18
|
+
def initialize(num_threads)
|
19
|
+
@num_threads = num_threads
|
20
|
+
@threads = []
|
21
|
+
@work_queue = Queue.new
|
22
|
+
@state = STATE_CREATED
|
23
|
+
@pool_lock = Mutex.new
|
24
|
+
@num_active_tasks = VCAP::Concurrency::AtomicVar.new(0)
|
25
|
+
end
|
26
|
+
|
27
|
+
# Creates all threads in the pool and starts them. Tasks that were enqueued
|
28
|
+
# prior to starting the pool will be processed immediately.
|
29
|
+
def start
|
30
|
+
@pool_lock.synchronize do
|
31
|
+
|
32
|
+
assert_state_in(STATE_CREATED)
|
33
|
+
|
34
|
+
@num_threads.times do
|
35
|
+
@threads << create_worker_thread
|
36
|
+
end
|
37
|
+
|
38
|
+
@state = STATE_STARTED
|
39
|
+
end
|
40
|
+
|
41
|
+
nil
|
42
|
+
end
|
43
|
+
|
44
|
+
# Adds a block that will be executed by a worker thread.
|
45
|
+
#
|
46
|
+
# @param [Block] blk The block to be executed by a worker thread.
|
47
|
+
#
|
48
|
+
# @return [VCAP::Concurrent::Promise] The caller of enqueue() may wait for
|
49
|
+
# the result of blk by calling resolve()
|
50
|
+
def enqueue(&blk)
|
51
|
+
@pool_lock.synchronize do
|
52
|
+
assert_state_in(STATE_CREATED, STATE_STARTED)
|
53
|
+
|
54
|
+
promise = VCAP::Concurrency::Promise.new
|
55
|
+
|
56
|
+
@work_queue.enq([blk, promise])
|
57
|
+
|
58
|
+
promise
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
# Stops the thread pool politely, allowing existing work to be completed.
|
63
|
+
def stop
|
64
|
+
@pool_lock.synchronize do
|
65
|
+
@num_threads.times { @work_queue.enq(STOP_SENTINEL) }
|
66
|
+
|
67
|
+
@state = STATE_STOPPED
|
68
|
+
end
|
69
|
+
|
70
|
+
nil
|
71
|
+
end
|
72
|
+
|
73
|
+
# Waits for all worker threads to finish executing.
|
74
|
+
def join
|
75
|
+
@pool_lock.synchronize do
|
76
|
+
assert_state_in(STATE_STARTED, STATE_STOPPED)
|
77
|
+
end
|
78
|
+
|
79
|
+
@threads.each { |t| t.join }
|
80
|
+
|
81
|
+
nil
|
82
|
+
end
|
83
|
+
|
84
|
+
# Queues up sentinel values to notify workers to stop, then waits for them
|
85
|
+
# to finish.
|
86
|
+
def shutdown
|
87
|
+
stop
|
88
|
+
join
|
89
|
+
|
90
|
+
nil
|
91
|
+
end
|
92
|
+
|
93
|
+
# Returns the number of tasks that are currently running. This is equivalent
|
94
|
+
# to the number of active threads.
|
95
|
+
#
|
96
|
+
# @return [Integer]
|
97
|
+
def num_active_tasks
|
98
|
+
@num_active_tasks.value
|
99
|
+
end
|
100
|
+
|
101
|
+
# Returns the number of tasks waiting to be processed
|
102
|
+
#
|
103
|
+
# NB: While technically correct, this will include the number of unprocessed
|
104
|
+
# sentinel tasks after stop() is called.
|
105
|
+
#
|
106
|
+
# @return [Integer]
|
107
|
+
def num_queued_tasks
|
108
|
+
@work_queue.length
|
109
|
+
end
|
110
|
+
|
111
|
+
private
|
112
|
+
|
113
|
+
def do_work # son!
|
114
|
+
while (item = @work_queue.deq) != STOP_SENTINEL
|
115
|
+
blk, promise = item
|
116
|
+
|
117
|
+
@num_active_tasks.mutate { |v| v + 1 }
|
118
|
+
|
119
|
+
result, error, success = nil, nil, true
|
120
|
+
begin
|
121
|
+
result = blk.call
|
122
|
+
rescue => e
|
123
|
+
success = false
|
124
|
+
error = e
|
125
|
+
end
|
126
|
+
|
127
|
+
# This is intentionally outside of the begin/rescue block above. Errors
|
128
|
+
# here are bugs in our code, and shouldn't be propagated back to
|
129
|
+
# whomever enqueued the task.
|
130
|
+
if success
|
131
|
+
promise.deliver(result)
|
132
|
+
else
|
133
|
+
promise.fail(error)
|
134
|
+
end
|
135
|
+
|
136
|
+
@num_active_tasks.mutate { |v| v - 1 }
|
137
|
+
end
|
138
|
+
|
139
|
+
nil
|
140
|
+
end
|
141
|
+
|
142
|
+
def create_worker_thread
|
143
|
+
t = Thread.new { do_work }
|
144
|
+
t.abort_on_exception = true
|
145
|
+
|
146
|
+
t
|
147
|
+
end
|
148
|
+
|
149
|
+
def assert_state_in(*states)
|
150
|
+
raise "Invalid state" unless states.include?(@state)
|
151
|
+
end
|
152
|
+
end
|
@@ -0,0 +1,56 @@
|
|
1
|
+
require "spec_helper"
|
2
|
+
|
3
|
+
describe VCAP::Concurrency::AtomicVar do
|
4
|
+
describe "#value" do
|
5
|
+
it "should return the current value" do
|
6
|
+
iv = 5
|
7
|
+
av = VCAP::Concurrency::AtomicVar.new(iv)
|
8
|
+
av.value.should == iv
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
describe "#value=" do
|
13
|
+
it "should allow the current value to be changed" do
|
14
|
+
av = VCAP::Concurrency::AtomicVar.new(1)
|
15
|
+
nv = 2
|
16
|
+
av.value = nv
|
17
|
+
av.value.should == nv
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
describe "#mutate" do
|
22
|
+
it "should update the value to the result of the supplied block" do
|
23
|
+
iv = 2
|
24
|
+
av = VCAP::Concurrency::AtomicVar.new(iv)
|
25
|
+
av.mutate { |v| v * v }
|
26
|
+
av.value.should == (iv * iv)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
describe "#wait_value_changed" do
|
31
|
+
it "should return immediately if the current value differs from the supplied value" do
|
32
|
+
iv = 1
|
33
|
+
av = VCAP::Concurrency::AtomicVar.new(iv)
|
34
|
+
av.wait_value_changed(2).should == iv
|
35
|
+
end
|
36
|
+
|
37
|
+
it "should block if the current value is the same" do
|
38
|
+
barrier = VCAP::Concurrency::AtomicVar.new(0)
|
39
|
+
|
40
|
+
# We're using the atomic var as a form of synchronization here.
|
41
|
+
t = Thread.new do
|
42
|
+
barrier.wait_value_changed(0)
|
43
|
+
|
44
|
+
barrier.mutate { |v| v + 1 }
|
45
|
+
end
|
46
|
+
|
47
|
+
cur_val = barrier.mutate { |v| v + 1 }
|
48
|
+
|
49
|
+
barrier.wait_value_changed(cur_val)
|
50
|
+
|
51
|
+
t.join
|
52
|
+
|
53
|
+
barrier.value.should == 2
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
@@ -0,0 +1,83 @@
|
|
1
|
+
require "spec_helper"
|
2
|
+
|
3
|
+
describe VCAP::Concurrency::Promise do
|
4
|
+
let(:promise) { VCAP::Concurrency::Promise.new }
|
5
|
+
|
6
|
+
describe "#deliver " do
|
7
|
+
it "should deliver the supplied result to callers of resolve" do
|
8
|
+
promise.deliver(:done)
|
9
|
+
promise.resolve.should == :done
|
10
|
+
end
|
11
|
+
|
12
|
+
it "should raise an error if called more than once" do
|
13
|
+
promise.deliver
|
14
|
+
expect do
|
15
|
+
promise.deliver
|
16
|
+
end.to raise_error(/completed once/)
|
17
|
+
end
|
18
|
+
|
19
|
+
it "should wake up all threads that are resolving it" do
|
20
|
+
lock = Mutex.new
|
21
|
+
cond = ConditionVariable.new
|
22
|
+
waiting = 0
|
23
|
+
threads = []
|
24
|
+
5.times do |ii|
|
25
|
+
threads << Thread.new do
|
26
|
+
lock.synchronize do
|
27
|
+
waiting += 1
|
28
|
+
cond.signal
|
29
|
+
end
|
30
|
+
promise.resolve
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
done = false
|
35
|
+
while !done
|
36
|
+
lock.synchronize do
|
37
|
+
if waiting == threads.length
|
38
|
+
done = true
|
39
|
+
else
|
40
|
+
cond.wait(lock)
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
promise.deliver
|
46
|
+
|
47
|
+
# join returns nil if timeout occurred and the thread hadn't finished
|
48
|
+
threads.each { |t| t.join(1).should == t }
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
describe "#fail" do
|
53
|
+
it "should deliver the supplied exception to callers of resolve" do
|
54
|
+
error_text = "test error"
|
55
|
+
promise.fail(RuntimeError.new(error_text))
|
56
|
+
expect do
|
57
|
+
promise.resolve
|
58
|
+
end.to raise_error(/#{error_text}/)
|
59
|
+
end
|
60
|
+
|
61
|
+
it "should raise an error if called more than once" do
|
62
|
+
e = RuntimeError.new("test")
|
63
|
+
promise.fail(e)
|
64
|
+
expect do
|
65
|
+
promise.fail(e)
|
66
|
+
end.to raise_error(/completed once/)
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
describe "#resolve" do
|
71
|
+
it "should raise an error when a timeout occurs" do
|
72
|
+
start = Time.now
|
73
|
+
|
74
|
+
expect do
|
75
|
+
promise.resolve(0.5)
|
76
|
+
end.to raise_error(VCAP::Concurrency::TimeoutError)
|
77
|
+
|
78
|
+
elapsed = Time.now - start
|
79
|
+
|
80
|
+
elapsed.should be_within(1).of(0.5)
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|