concurrent-ruby 0.5.0 → 0.6.0.pre.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +88 -77
- data/lib/concurrent.rb +17 -2
- data/lib/concurrent/actor.rb +17 -0
- data/lib/concurrent/actor_context.rb +31 -0
- data/lib/concurrent/actor_ref.rb +39 -0
- data/lib/concurrent/agent.rb +12 -3
- data/lib/concurrent/async.rb +290 -0
- data/lib/concurrent/atomic.rb +5 -9
- data/lib/concurrent/cached_thread_pool.rb +39 -137
- data/lib/concurrent/channel/blocking_ring_buffer.rb +60 -0
- data/lib/concurrent/channel/buffered_channel.rb +83 -0
- data/lib/concurrent/channel/channel.rb +11 -0
- data/lib/concurrent/channel/probe.rb +19 -0
- data/lib/concurrent/channel/ring_buffer.rb +54 -0
- data/lib/concurrent/channel/unbuffered_channel.rb +34 -0
- data/lib/concurrent/channel/waitable_list.rb +38 -0
- data/lib/concurrent/configuration.rb +92 -0
- data/lib/concurrent/dataflow.rb +9 -3
- data/lib/concurrent/delay.rb +88 -0
- data/lib/concurrent/exchanger.rb +31 -0
- data/lib/concurrent/fixed_thread_pool.rb +28 -122
- data/lib/concurrent/future.rb +10 -5
- data/lib/concurrent/immediate_executor.rb +3 -2
- data/lib/concurrent/ivar.rb +2 -1
- data/lib/concurrent/java_cached_thread_pool.rb +45 -0
- data/lib/concurrent/java_fixed_thread_pool.rb +37 -0
- data/lib/concurrent/java_thread_pool_executor.rb +194 -0
- data/lib/concurrent/per_thread_executor.rb +23 -0
- data/lib/concurrent/postable.rb +2 -0
- data/lib/concurrent/processor_count.rb +125 -0
- data/lib/concurrent/promise.rb +42 -18
- data/lib/concurrent/ruby_cached_thread_pool.rb +37 -0
- data/lib/concurrent/ruby_fixed_thread_pool.rb +31 -0
- data/lib/concurrent/ruby_thread_pool_executor.rb +268 -0
- data/lib/concurrent/ruby_thread_pool_worker.rb +69 -0
- data/lib/concurrent/simple_actor_ref.rb +124 -0
- data/lib/concurrent/thread_local_var.rb +1 -1
- data/lib/concurrent/thread_pool_executor.rb +30 -0
- data/lib/concurrent/timer_task.rb +13 -10
- data/lib/concurrent/tvar.rb +212 -0
- data/lib/concurrent/utilities.rb +1 -0
- data/lib/concurrent/version.rb +1 -1
- data/spec/concurrent/actor_context_spec.rb +37 -0
- data/spec/concurrent/actor_ref_shared.rb +313 -0
- data/spec/concurrent/actor_spec.rb +9 -1
- data/spec/concurrent/agent_spec.rb +97 -96
- data/spec/concurrent/async_spec.rb +320 -0
- data/spec/concurrent/cached_thread_pool_shared.rb +137 -0
- data/spec/concurrent/channel/blocking_ring_buffer_spec.rb +149 -0
- data/spec/concurrent/channel/buffered_channel_spec.rb +151 -0
- data/spec/concurrent/channel/channel_spec.rb +37 -0
- data/spec/concurrent/channel/probe_spec.rb +49 -0
- data/spec/concurrent/channel/ring_buffer_spec.rb +126 -0
- data/spec/concurrent/channel/unbuffered_channel_spec.rb +132 -0
- data/spec/concurrent/configuration_spec.rb +134 -0
- data/spec/concurrent/dataflow_spec.rb +109 -27
- data/spec/concurrent/delay_spec.rb +77 -0
- data/spec/concurrent/exchanger_spec.rb +66 -0
- data/spec/concurrent/fixed_thread_pool_shared.rb +136 -0
- data/spec/concurrent/future_spec.rb +60 -51
- data/spec/concurrent/global_thread_pool_shared.rb +33 -0
- data/spec/concurrent/immediate_executor_spec.rb +4 -25
- data/spec/concurrent/ivar_spec.rb +36 -23
- data/spec/concurrent/java_cached_thread_pool_spec.rb +64 -0
- data/spec/concurrent/java_fixed_thread_pool_spec.rb +64 -0
- data/spec/concurrent/java_thread_pool_executor_spec.rb +71 -0
- data/spec/concurrent/obligation_shared.rb +32 -20
- data/spec/concurrent/{global_thread_pool_spec.rb → per_thread_executor_spec.rb} +9 -13
- data/spec/concurrent/processor_count_spec.rb +20 -0
- data/spec/concurrent/promise_spec.rb +29 -41
- data/spec/concurrent/ruby_cached_thread_pool_spec.rb +69 -0
- data/spec/concurrent/ruby_fixed_thread_pool_spec.rb +39 -0
- data/spec/concurrent/ruby_thread_pool_executor_spec.rb +183 -0
- data/spec/concurrent/simple_actor_ref_spec.rb +219 -0
- data/spec/concurrent/thread_pool_class_cast_spec.rb +40 -0
- data/spec/concurrent/thread_pool_executor_shared.rb +155 -0
- data/spec/concurrent/thread_pool_shared.rb +98 -36
- data/spec/concurrent/tvar_spec.rb +137 -0
- data/spec/spec_helper.rb +4 -0
- data/spec/support/functions.rb +4 -0
- metadata +85 -20
- data/lib/concurrent/cached_thread_pool/worker.rb +0 -91
- data/lib/concurrent/channel.rb +0 -63
- data/lib/concurrent/fixed_thread_pool/worker.rb +0 -54
- data/lib/concurrent/global_thread_pool.rb +0 -42
- data/spec/concurrent/cached_thread_pool_spec.rb +0 -101
- data/spec/concurrent/channel_spec.rb +0 -86
- data/spec/concurrent/fixed_thread_pool_spec.rb +0 -92
- data/spec/concurrent/uses_global_thread_pool_shared.rb +0 -64
@@ -0,0 +1,60 @@
|
|
1
|
+
module Concurrent
|
2
|
+
class BlockingRingBuffer
|
3
|
+
|
4
|
+
def initialize(capacity)
|
5
|
+
@buffer = RingBuffer.new(capacity)
|
6
|
+
@first = @last = 0
|
7
|
+
@count = 0
|
8
|
+
@mutex = Mutex.new
|
9
|
+
@condition = Condition.new
|
10
|
+
end
|
11
|
+
|
12
|
+
def capacity
|
13
|
+
@mutex.synchronize { @buffer.capacity }
|
14
|
+
end
|
15
|
+
|
16
|
+
def count
|
17
|
+
@mutex.synchronize { @buffer.count }
|
18
|
+
end
|
19
|
+
|
20
|
+
def full?
|
21
|
+
@mutex.synchronize { @buffer.full? }
|
22
|
+
end
|
23
|
+
|
24
|
+
def empty?
|
25
|
+
@mutex.synchronize { @buffer.empty? }
|
26
|
+
end
|
27
|
+
|
28
|
+
def put(value)
|
29
|
+
@mutex.synchronize do
|
30
|
+
wait_while_full
|
31
|
+
@buffer.offer(value)
|
32
|
+
@condition.signal
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def take
|
37
|
+
@mutex.synchronize do
|
38
|
+
wait_while_empty
|
39
|
+
result = @buffer.poll
|
40
|
+
@condition.signal
|
41
|
+
result
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def peek
|
46
|
+
@mutex.synchronize { @buffer.peek }
|
47
|
+
end
|
48
|
+
|
49
|
+
private
|
50
|
+
|
51
|
+
def wait_while_full
|
52
|
+
@condition.wait(@mutex) while @buffer.full?
|
53
|
+
end
|
54
|
+
|
55
|
+
def wait_while_empty
|
56
|
+
@condition.wait(@mutex) while @buffer.empty?
|
57
|
+
end
|
58
|
+
|
59
|
+
end
|
60
|
+
end
|
@@ -0,0 +1,83 @@
|
|
1
|
+
require_relative 'waitable_list'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
class BufferedChannel
|
5
|
+
|
6
|
+
def initialize(size)
|
7
|
+
@mutex = Mutex.new
|
8
|
+
@condition = Condition.new
|
9
|
+
@buffer_condition = Condition.new
|
10
|
+
|
11
|
+
@probe_set = WaitableList.new
|
12
|
+
@buffer = RingBuffer.new(size)
|
13
|
+
end
|
14
|
+
|
15
|
+
def probe_set_size
|
16
|
+
@probe_set.size
|
17
|
+
end
|
18
|
+
|
19
|
+
def buffer_queue_size
|
20
|
+
@mutex.synchronize { @buffer.count }
|
21
|
+
end
|
22
|
+
|
23
|
+
def push(value)
|
24
|
+
until set_probe_or_push_into_buffer(value)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def pop
|
29
|
+
probe = Probe.new
|
30
|
+
select(probe)
|
31
|
+
probe.value
|
32
|
+
end
|
33
|
+
|
34
|
+
def select(probe)
|
35
|
+
@mutex.synchronize do
|
36
|
+
|
37
|
+
if @buffer.empty?
|
38
|
+
@probe_set.put(probe)
|
39
|
+
true
|
40
|
+
else
|
41
|
+
shift_buffer if probe.set_unless_assigned peek_buffer
|
42
|
+
end
|
43
|
+
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
def remove_probe(probe)
|
48
|
+
@probe_set.delete(probe)
|
49
|
+
end
|
50
|
+
|
51
|
+
private
|
52
|
+
|
53
|
+
def push_into_buffer(value)
|
54
|
+
@buffer_condition.wait(@mutex) while @buffer.full?
|
55
|
+
@buffer.offer value
|
56
|
+
@buffer_condition.broadcast
|
57
|
+
end
|
58
|
+
|
59
|
+
def peek_buffer
|
60
|
+
@buffer_condition.wait(@mutex) while @buffer.empty?
|
61
|
+
@buffer.peek
|
62
|
+
end
|
63
|
+
|
64
|
+
def shift_buffer
|
65
|
+
@buffer_condition.wait(@mutex) while @buffer.empty?
|
66
|
+
result = @buffer.poll
|
67
|
+
@buffer_condition.broadcast
|
68
|
+
result
|
69
|
+
end
|
70
|
+
|
71
|
+
def set_probe_or_push_into_buffer(value)
|
72
|
+
@mutex.synchronize do
|
73
|
+
if @probe_set.empty?
|
74
|
+
push_into_buffer(value)
|
75
|
+
true
|
76
|
+
else
|
77
|
+
@probe_set.take.set_unless_assigned(value)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
end
|
83
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
module Concurrent
|
2
|
+
class Probe < IVar
|
3
|
+
|
4
|
+
def initialize(value = NO_VALUE, opts = {})
|
5
|
+
super(value, opts)
|
6
|
+
end
|
7
|
+
|
8
|
+
def set_unless_assigned(value)
|
9
|
+
mutex.synchronize do
|
10
|
+
return false if [:fulfilled, :rejected].include? @state
|
11
|
+
|
12
|
+
set_state(true, value, nil)
|
13
|
+
event.set
|
14
|
+
true
|
15
|
+
end
|
16
|
+
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
module Concurrent
|
2
|
+
|
3
|
+
# not thread safe buffer
|
4
|
+
class RingBuffer
|
5
|
+
|
6
|
+
def initialize(capacity)
|
7
|
+
@buffer = Array.new(capacity)
|
8
|
+
@first = @last = 0
|
9
|
+
@count = 0
|
10
|
+
end
|
11
|
+
|
12
|
+
def capacity
|
13
|
+
@buffer.size
|
14
|
+
end
|
15
|
+
|
16
|
+
def count
|
17
|
+
@count
|
18
|
+
end
|
19
|
+
|
20
|
+
def empty?
|
21
|
+
@count == 0
|
22
|
+
end
|
23
|
+
|
24
|
+
def full?
|
25
|
+
@count == capacity
|
26
|
+
end
|
27
|
+
|
28
|
+
# @param [Object] value
|
29
|
+
# @return [Boolean] true if value has been inserted, false otherwise
|
30
|
+
def offer(value)
|
31
|
+
return false if full?
|
32
|
+
|
33
|
+
@buffer[@last] = value
|
34
|
+
@last = (@last + 1) % @buffer.size
|
35
|
+
@count += 1
|
36
|
+
true
|
37
|
+
end
|
38
|
+
|
39
|
+
# @return [Object] the first available value and removes it from the buffer. If buffer is empty returns nil
|
40
|
+
def poll
|
41
|
+
result = @buffer[@first]
|
42
|
+
@buffer[@first] = nil
|
43
|
+
@first = (@first + 1) % @buffer.size
|
44
|
+
@count -= 1
|
45
|
+
result
|
46
|
+
end
|
47
|
+
|
48
|
+
# @return [Object] the first available value and without removing it from the buffer. If buffer is empty returns nil
|
49
|
+
def peek
|
50
|
+
@buffer[@first]
|
51
|
+
end
|
52
|
+
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require_relative 'waitable_list'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
class UnbufferedChannel
|
5
|
+
|
6
|
+
def initialize
|
7
|
+
@probe_set = WaitableList.new
|
8
|
+
end
|
9
|
+
|
10
|
+
def probe_set_size
|
11
|
+
@probe_set.size
|
12
|
+
end
|
13
|
+
|
14
|
+
def push(value)
|
15
|
+
until @probe_set.take.set_unless_assigned(value)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
def pop
|
20
|
+
probe = Probe.new
|
21
|
+
select(probe)
|
22
|
+
probe.value
|
23
|
+
end
|
24
|
+
|
25
|
+
def select(probe)
|
26
|
+
@probe_set.put(probe)
|
27
|
+
end
|
28
|
+
|
29
|
+
def remove_probe(probe)
|
30
|
+
@probe_set.delete(probe)
|
31
|
+
end
|
32
|
+
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
module Concurrent
|
2
|
+
class WaitableList
|
3
|
+
|
4
|
+
def initialize
|
5
|
+
@mutex = Mutex.new
|
6
|
+
@condition = Condition.new
|
7
|
+
|
8
|
+
@list = []
|
9
|
+
end
|
10
|
+
|
11
|
+
def size
|
12
|
+
@mutex.synchronize { @list.size }
|
13
|
+
end
|
14
|
+
|
15
|
+
def empty?
|
16
|
+
@mutex.synchronize { @list.empty? }
|
17
|
+
end
|
18
|
+
|
19
|
+
def put(value)
|
20
|
+
@mutex.synchronize do
|
21
|
+
@list << value
|
22
|
+
@condition.signal
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
def delete(value)
|
27
|
+
@mutex.synchronize { @list.delete(value) }
|
28
|
+
end
|
29
|
+
|
30
|
+
def take
|
31
|
+
@mutex.synchronize do
|
32
|
+
@condition.wait(@mutex) while @list.empty?
|
33
|
+
@list.shift
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,92 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require 'concurrent/thread_pool_executor'
|
3
|
+
require 'concurrent/processor_count'
|
4
|
+
|
5
|
+
module Concurrent
|
6
|
+
|
7
|
+
ConfigurationError = Class.new(StandardError)
|
8
|
+
|
9
|
+
class << self
|
10
|
+
attr_accessor :configuration
|
11
|
+
end
|
12
|
+
|
13
|
+
def self.configure
|
14
|
+
(@mutex ||= Mutex.new).synchronize do
|
15
|
+
yield(configuration)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
class Configuration
|
20
|
+
attr_accessor :global_task_pool
|
21
|
+
attr_accessor :global_operation_pool
|
22
|
+
|
23
|
+
def initialize
|
24
|
+
@cores ||= Concurrent::processor_count
|
25
|
+
end
|
26
|
+
|
27
|
+
def global_task_pool
|
28
|
+
@global_task_pool ||= Concurrent::ThreadPoolExecutor.new(
|
29
|
+
min_threads: [2, @cores].max,
|
30
|
+
max_threads: [20, @cores * 15].max,
|
31
|
+
idletime: 2 * 60, # 2 minutes
|
32
|
+
max_queue: 0, # unlimited
|
33
|
+
overflow_policy: :abort # raise an exception
|
34
|
+
)
|
35
|
+
end
|
36
|
+
|
37
|
+
def global_operation_pool
|
38
|
+
@global_operation_pool ||= Concurrent::ThreadPoolExecutor.new(
|
39
|
+
min_threads: [2, @cores].max,
|
40
|
+
max_threads: [2, @cores].max,
|
41
|
+
idletime: 10 * 60, # 10 minutes
|
42
|
+
max_queue: [20, @cores * 15].max,
|
43
|
+
overflow_policy: :abort # raise an exception
|
44
|
+
)
|
45
|
+
end
|
46
|
+
|
47
|
+
def global_task_pool=(executor)
|
48
|
+
raise ConfigurationError.new('global task pool was already set') unless @global_task_pool.nil?
|
49
|
+
@global_task_pool = executor
|
50
|
+
end
|
51
|
+
|
52
|
+
def global_operation_pool=(executor)
|
53
|
+
raise ConfigurationError.new('global operation pool was already set') unless @global_operation_pool.nil?
|
54
|
+
@global_operation_pool = executor
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
module OptionsParser
|
59
|
+
|
60
|
+
def get_executor_from(opts = {})
|
61
|
+
if opts[:executor]
|
62
|
+
opts[:executor]
|
63
|
+
elsif opts[:operation] == true || opts[:task] == false
|
64
|
+
Concurrent.configuration.global_operation_pool
|
65
|
+
else
|
66
|
+
Concurrent.configuration.global_task_pool
|
67
|
+
end
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
private
|
72
|
+
|
73
|
+
def self.finalize_executor(executor)
|
74
|
+
return if executor.nil?
|
75
|
+
if executor.respond_to?(:shutdown)
|
76
|
+
executor.shutdown
|
77
|
+
elsif executor.respond_to?(:kill)
|
78
|
+
executor.kill
|
79
|
+
end
|
80
|
+
rescue
|
81
|
+
# suppress
|
82
|
+
end
|
83
|
+
|
84
|
+
# create the default configuration on load
|
85
|
+
self.configuration = Configuration.new
|
86
|
+
|
87
|
+
# set exit hook to shutdown global thread pools
|
88
|
+
at_exit do
|
89
|
+
self.finalize_executor(self.configuration.global_task_pool)
|
90
|
+
self.finalize_executor(self.configuration.global_operation_pool)
|
91
|
+
end
|
92
|
+
end
|
data/lib/concurrent/dataflow.rb
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
require 'concurrent/atomic'
|
2
2
|
require 'concurrent/future'
|
3
|
+
require 'concurrent/per_thread_executor'
|
3
4
|
|
4
5
|
module Concurrent
|
5
6
|
|
@@ -60,10 +61,16 @@ module Concurrent
|
|
60
61
|
# @raise [ArgumentError] if no block is given
|
61
62
|
# @raise [ArgumentError] if any of the inputs are not +IVar+s
|
62
63
|
def dataflow(*inputs, &block)
|
64
|
+
dataflow_with(Concurrent.configuration.global_task_pool, *inputs, &block)
|
65
|
+
end
|
66
|
+
module_function :dataflow
|
67
|
+
|
68
|
+
def dataflow_with(executor, *inputs, &block)
|
69
|
+
raise ArgumentError.new('an executor must be provided') if executor.nil?
|
63
70
|
raise ArgumentError.new('no block given') unless block_given?
|
64
71
|
raise ArgumentError.new('not all dependencies are IVars') unless inputs.all? { |input| input.is_a? IVar }
|
65
72
|
|
66
|
-
result = Future.new do
|
73
|
+
result = Future.new(executor: executor) do
|
67
74
|
values = inputs.map { |input| input.value }
|
68
75
|
block.call(*values)
|
69
76
|
end
|
@@ -80,6 +87,5 @@ module Concurrent
|
|
80
87
|
|
81
88
|
result
|
82
89
|
end
|
83
|
-
|
84
|
-
module_function :dataflow
|
90
|
+
module_function :dataflow_with
|
85
91
|
end
|
@@ -0,0 +1,88 @@
|
|
1
|
+
require 'thread'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# Lazy evaluation of a block yielding an immutable result. Useful for expensive
|
6
|
+
# operations that may never be needed.
|
7
|
+
#
|
8
|
+
# A `Delay` is similar to `Future` but solves a different problem.
|
9
|
+
# Where a `Future` schedules an operation for immediate execution and
|
10
|
+
# performs the operation asynchronously, a `Delay` (as the name implies)
|
11
|
+
# delays execution of the operation until the result is actually needed.
|
12
|
+
#
|
13
|
+
# When a `Delay` is created its state is set to `pending`. The value and
|
14
|
+
# reason are both `nil`. The first time the `#value` method is called the
|
15
|
+
# enclosed opration will be run and the calling thread will block. Other
|
16
|
+
# threads attempting to call `#value` will block as well. Once the operation
|
17
|
+
# is complete the *value* will be set to the result of the operation or the
|
18
|
+
# *reason* will be set to the raised exception, as appropriate. All threads
|
19
|
+
# blocked on `#value` will return. Subsequent calls to `#value` will immediately
|
20
|
+
# return the cached value. The operation will only be run once. This means that
|
21
|
+
# any side effects created by the operation will only happen once as well.
|
22
|
+
#
|
23
|
+
# `Delay` includes the `Concurrent::Dereferenceable` mixin to support thread
|
24
|
+
# safety of the reference returned by `#value`.
|
25
|
+
#
|
26
|
+
# @since 0.6.0
|
27
|
+
#
|
28
|
+
# @see Concurrent::Dereferenceable
|
29
|
+
#
|
30
|
+
# @see http://clojuredocs.org/clojure_core/clojure.core/delay
|
31
|
+
# @see http://aphyr.com/posts/306-clojure-from-the-ground-up-state
|
32
|
+
class Delay
|
33
|
+
include Obligation
|
34
|
+
|
35
|
+
# Create a new +Delay+ in the +:pending+ state.
|
36
|
+
#
|
37
|
+
# @yield the delayed operation to perform
|
38
|
+
#
|
39
|
+
# @param [Hash] opts the options to create a message with
|
40
|
+
# @option opts [String] :dup_on_deref (false) call +#dup+ before returning the data
|
41
|
+
# @option opts [String] :freeze_on_deref (false) call +#freeze+ before returning the data
|
42
|
+
# @option opts [String] :copy_on_deref (nil) call the given +Proc+ passing the internal value and
|
43
|
+
# returning the value returned from the proc
|
44
|
+
#
|
45
|
+
# @raise [ArgumentError] if no block is given
|
46
|
+
def initialize(opts = {}, &block)
|
47
|
+
raise ArgumentError.new('no block given') unless block_given?
|
48
|
+
|
49
|
+
init_obligation
|
50
|
+
@state = :pending
|
51
|
+
@task = block
|
52
|
+
set_deref_options(opts)
|
53
|
+
end
|
54
|
+
|
55
|
+
# Return the (possibly memoized) value of the delayed operation.
|
56
|
+
#
|
57
|
+
# If the state is +:pending+ then the calling thread will block while the
|
58
|
+
# operation is performed. All other threads simultaneously calling +#value+
|
59
|
+
# will block as well. Once the operation is complete (either +:fulfilled+ or
|
60
|
+
# +:rejected+) all waiting threads will unblock and the new value will be
|
61
|
+
# returned.
|
62
|
+
#
|
63
|
+
# If the state is not +:pending+ when +#value+ is called the (possibly memoized)
|
64
|
+
# value will be returned without blocking and without performing the operation
|
65
|
+
# again.
|
66
|
+
#
|
67
|
+
# Regardless of the final disposition all +Dereferenceable+ options set during
|
68
|
+
# object construction will be honored.
|
69
|
+
#
|
70
|
+
# @return [Object] the (possibly memoized) result of the block operation
|
71
|
+
#
|
72
|
+
# @see Concurrent::Dereferenceable
|
73
|
+
def value
|
74
|
+
mutex.synchronize do
|
75
|
+
if @state == :pending
|
76
|
+
begin
|
77
|
+
@value = @task.call
|
78
|
+
@state = :fulfilled
|
79
|
+
rescue => ex
|
80
|
+
@reason = ex
|
81
|
+
@state = :rejected
|
82
|
+
end
|
83
|
+
end
|
84
|
+
return apply_deref_options(@value)
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|