concurrent-ruby 0.7.0-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE.txt +21 -0
- data/README.md +217 -0
- data/lib/concurrent.rb +45 -0
- data/lib/concurrent/actor.rb +104 -0
- data/lib/concurrent/actor/behaviour.rb +70 -0
- data/lib/concurrent/actor/behaviour/abstract.rb +48 -0
- data/lib/concurrent/actor/behaviour/awaits.rb +21 -0
- data/lib/concurrent/actor/behaviour/buffer.rb +54 -0
- data/lib/concurrent/actor/behaviour/errors_on_unknown_message.rb +12 -0
- data/lib/concurrent/actor/behaviour/executes_context.rb +18 -0
- data/lib/concurrent/actor/behaviour/linking.rb +42 -0
- data/lib/concurrent/actor/behaviour/pausing.rb +77 -0
- data/lib/concurrent/actor/behaviour/removes_child.rb +16 -0
- data/lib/concurrent/actor/behaviour/sets_results.rb +36 -0
- data/lib/concurrent/actor/behaviour/supervised.rb +58 -0
- data/lib/concurrent/actor/behaviour/supervising.rb +34 -0
- data/lib/concurrent/actor/behaviour/terminates_children.rb +13 -0
- data/lib/concurrent/actor/behaviour/termination.rb +54 -0
- data/lib/concurrent/actor/context.rb +153 -0
- data/lib/concurrent/actor/core.rb +213 -0
- data/lib/concurrent/actor/default_dead_letter_handler.rb +9 -0
- data/lib/concurrent/actor/envelope.rb +41 -0
- data/lib/concurrent/actor/errors.rb +27 -0
- data/lib/concurrent/actor/internal_delegations.rb +49 -0
- data/lib/concurrent/actor/public_delegations.rb +40 -0
- data/lib/concurrent/actor/reference.rb +81 -0
- data/lib/concurrent/actor/root.rb +37 -0
- data/lib/concurrent/actor/type_check.rb +48 -0
- data/lib/concurrent/actor/utils.rb +10 -0
- data/lib/concurrent/actor/utils/ad_hoc.rb +21 -0
- data/lib/concurrent/actor/utils/balancer.rb +40 -0
- data/lib/concurrent/actor/utils/broadcast.rb +52 -0
- data/lib/concurrent/actor/utils/pool.rb +59 -0
- data/lib/concurrent/actress.rb +3 -0
- data/lib/concurrent/agent.rb +230 -0
- data/lib/concurrent/async.rb +284 -0
- data/lib/concurrent/atomic.rb +91 -0
- data/lib/concurrent/atomic/atomic_boolean.rb +202 -0
- data/lib/concurrent/atomic/atomic_fixnum.rb +203 -0
- data/lib/concurrent/atomic/condition.rb +67 -0
- data/lib/concurrent/atomic/copy_on_notify_observer_set.rb +118 -0
- data/lib/concurrent/atomic/copy_on_write_observer_set.rb +117 -0
- data/lib/concurrent/atomic/count_down_latch.rb +116 -0
- data/lib/concurrent/atomic/cyclic_barrier.rb +106 -0
- data/lib/concurrent/atomic/event.rb +98 -0
- data/lib/concurrent/atomic/synchronization.rb +51 -0
- data/lib/concurrent/atomic/thread_local_var.rb +82 -0
- data/lib/concurrent/atomic_reference/concurrent_update_error.rb +8 -0
- data/lib/concurrent/atomic_reference/direct_update.rb +50 -0
- data/lib/concurrent/atomic_reference/jruby.rb +14 -0
- data/lib/concurrent/atomic_reference/mutex_atomic.rb +77 -0
- data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +25 -0
- data/lib/concurrent/atomic_reference/rbx.rb +19 -0
- data/lib/concurrent/atomic_reference/ruby.rb +37 -0
- data/lib/concurrent/atomics.rb +11 -0
- data/lib/concurrent/channel/buffered_channel.rb +85 -0
- data/lib/concurrent/channel/channel.rb +41 -0
- data/lib/concurrent/channel/unbuffered_channel.rb +35 -0
- data/lib/concurrent/channel/waitable_list.rb +40 -0
- data/lib/concurrent/channels.rb +5 -0
- data/lib/concurrent/collection/blocking_ring_buffer.rb +71 -0
- data/lib/concurrent/collection/priority_queue.rb +305 -0
- data/lib/concurrent/collection/ring_buffer.rb +59 -0
- data/lib/concurrent/collections.rb +3 -0
- data/lib/concurrent/configuration.rb +161 -0
- data/lib/concurrent/dataflow.rb +108 -0
- data/lib/concurrent/delay.rb +104 -0
- data/lib/concurrent/dereferenceable.rb +101 -0
- data/lib/concurrent/errors.rb +30 -0
- data/lib/concurrent/exchanger.rb +34 -0
- data/lib/concurrent/executor/cached_thread_pool.rb +44 -0
- data/lib/concurrent/executor/executor.rb +282 -0
- data/lib/concurrent/executor/fixed_thread_pool.rb +33 -0
- data/lib/concurrent/executor/immediate_executor.rb +65 -0
- data/lib/concurrent/executor/java_cached_thread_pool.rb +31 -0
- data/lib/concurrent/executor/java_fixed_thread_pool.rb +41 -0
- data/lib/concurrent/executor/java_single_thread_executor.rb +22 -0
- data/lib/concurrent/executor/java_thread_pool_executor.rb +180 -0
- data/lib/concurrent/executor/per_thread_executor.rb +100 -0
- data/lib/concurrent/executor/ruby_cached_thread_pool.rb +29 -0
- data/lib/concurrent/executor/ruby_fixed_thread_pool.rb +32 -0
- data/lib/concurrent/executor/ruby_single_thread_executor.rb +74 -0
- data/lib/concurrent/executor/ruby_thread_pool_executor.rb +288 -0
- data/lib/concurrent/executor/ruby_thread_pool_worker.rb +72 -0
- data/lib/concurrent/executor/safe_task_executor.rb +35 -0
- data/lib/concurrent/executor/serialized_execution.rb +126 -0
- data/lib/concurrent/executor/single_thread_executor.rb +35 -0
- data/lib/concurrent/executor/thread_pool_executor.rb +68 -0
- data/lib/concurrent/executor/timer_set.rb +143 -0
- data/lib/concurrent/executors.rb +9 -0
- data/lib/concurrent/future.rb +125 -0
- data/lib/concurrent/ivar.rb +111 -0
- data/lib/concurrent/lazy_register.rb +58 -0
- data/lib/concurrent/logging.rb +17 -0
- data/lib/concurrent/mvar.rb +200 -0
- data/lib/concurrent/obligation.rb +171 -0
- data/lib/concurrent/observable.rb +40 -0
- data/lib/concurrent/options_parser.rb +48 -0
- data/lib/concurrent/promise.rb +170 -0
- data/lib/concurrent/scheduled_task.rb +79 -0
- data/lib/concurrent/timer_task.rb +341 -0
- data/lib/concurrent/tvar.rb +248 -0
- data/lib/concurrent/utilities.rb +3 -0
- data/lib/concurrent/utility/processor_count.rb +152 -0
- data/lib/concurrent/utility/timeout.rb +35 -0
- data/lib/concurrent/utility/timer.rb +21 -0
- data/lib/concurrent/version.rb +3 -0
- data/lib/concurrent_ruby.rb +1 -0
- data/lib/concurrent_ruby_ext.jar +0 -0
- data/lib/concurrent_ruby_ext.so +0 -0
- data/lib/extension_helper.rb +28 -0
- metadata +163 -0
@@ -0,0 +1,59 @@
|
|
1
|
+
module Concurrent
|
2
|
+
|
3
|
+
# non-thread safe buffer
|
4
|
+
class RingBuffer
|
5
|
+
|
6
|
+
def initialize(capacity)
|
7
|
+
@buffer = Array.new(capacity)
|
8
|
+
@first = @last = 0
|
9
|
+
@count = 0
|
10
|
+
end
|
11
|
+
|
12
|
+
|
13
|
+
# @return [Integer] the capacity of the buffer
|
14
|
+
def capacity
|
15
|
+
@buffer.size
|
16
|
+
end
|
17
|
+
|
18
|
+
# @return [Integer] the number of elements currently in the buffer
|
19
|
+
def count
|
20
|
+
@count
|
21
|
+
end
|
22
|
+
|
23
|
+
# @return [Boolean] true if buffer is empty, false otherwise
|
24
|
+
def empty?
|
25
|
+
@count == 0
|
26
|
+
end
|
27
|
+
|
28
|
+
# @return [Boolean] true if buffer is full, false otherwise
|
29
|
+
def full?
|
30
|
+
@count == capacity
|
31
|
+
end
|
32
|
+
|
33
|
+
# @param [Object] value
|
34
|
+
# @return [Boolean] true if value has been inserted, false otherwise
|
35
|
+
def offer(value)
|
36
|
+
return false if full?
|
37
|
+
|
38
|
+
@buffer[@last] = value
|
39
|
+
@last = (@last + 1) % @buffer.size
|
40
|
+
@count += 1
|
41
|
+
true
|
42
|
+
end
|
43
|
+
|
44
|
+
# @return [Object] the first available value and removes it from the buffer. If buffer is empty returns nil
|
45
|
+
def poll
|
46
|
+
result = @buffer[@first]
|
47
|
+
@buffer[@first] = nil
|
48
|
+
@first = (@first + 1) % @buffer.size
|
49
|
+
@count -= 1
|
50
|
+
result
|
51
|
+
end
|
52
|
+
|
53
|
+
# @return [Object] the first available value and without removing it from the buffer. If buffer is empty returns nil
|
54
|
+
def peek
|
55
|
+
@buffer[@first]
|
56
|
+
end
|
57
|
+
|
58
|
+
end
|
59
|
+
end
|
@@ -0,0 +1,161 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require 'concurrent/delay'
|
3
|
+
require 'concurrent/errors'
|
4
|
+
require 'concurrent/atomic'
|
5
|
+
require 'concurrent/executor/immediate_executor'
|
6
|
+
require 'concurrent/executor/thread_pool_executor'
|
7
|
+
require 'concurrent/executor/timer_set'
|
8
|
+
require 'concurrent/utility/processor_count'
|
9
|
+
|
10
|
+
module Concurrent
|
11
|
+
extend Logging
|
12
|
+
|
13
|
+
# A gem-level configuration object.
|
14
|
+
class Configuration
|
15
|
+
|
16
|
+
# a proc defining how to log messages, its interface has to be:
|
17
|
+
# lambda { |level, progname, message = nil, &block| _ }
|
18
|
+
attr_accessor :logger
|
19
|
+
|
20
|
+
# Create a new configuration object.
|
21
|
+
def initialize
|
22
|
+
immediate_executor = ImmediateExecutor.new
|
23
|
+
@global_task_pool = Delay.new(executor: immediate_executor) { new_task_pool }
|
24
|
+
@global_operation_pool = Delay.new(executor: immediate_executor) { new_operation_pool }
|
25
|
+
@global_timer_set = Delay.new(executor: immediate_executor) { Concurrent::TimerSet.new }
|
26
|
+
@logger = no_logger
|
27
|
+
end
|
28
|
+
|
29
|
+
# if assigned to {#logger}, it will log nothing.
|
30
|
+
def no_logger
|
31
|
+
lambda { |level, progname, message = nil, &block| }
|
32
|
+
end
|
33
|
+
|
34
|
+
# Global thread pool optimized for short *tasks*.
|
35
|
+
#
|
36
|
+
# @return [ThreadPoolExecutor] the thread pool
|
37
|
+
def global_task_pool
|
38
|
+
@global_task_pool.value
|
39
|
+
end
|
40
|
+
|
41
|
+
# Global thread pool optimized for long *operations*.
|
42
|
+
#
|
43
|
+
# @return [ThreadPoolExecutor] the thread pool
|
44
|
+
def global_operation_pool
|
45
|
+
@global_operation_pool.value
|
46
|
+
end
|
47
|
+
|
48
|
+
# Global thread pool optimized for *timers*
|
49
|
+
#
|
50
|
+
# @return [ThreadPoolExecutor] the thread pool
|
51
|
+
#
|
52
|
+
# @see Concurrent::timer
|
53
|
+
def global_timer_set
|
54
|
+
@global_timer_set.value
|
55
|
+
end
|
56
|
+
|
57
|
+
# Global thread pool optimized for short *tasks*.
|
58
|
+
#
|
59
|
+
# A global thread pool must be set as soon as the gem is loaded. Setting a new
|
60
|
+
# thread pool once tasks and operations have been post can lead to unpredictable
|
61
|
+
# results. The first time a task/operation is post a new thread pool will be
|
62
|
+
# created using the default configuration. Once set the thread pool cannot be
|
63
|
+
# changed. Thus, explicitly setting the thread pool must occur *before* any
|
64
|
+
# tasks/operations are post else an exception will be raised.
|
65
|
+
#
|
66
|
+
# @param [Executor] executor the executor to be used for this thread pool
|
67
|
+
#
|
68
|
+
# @return [ThreadPoolExecutor] the new thread pool
|
69
|
+
#
|
70
|
+
# @raise [Concurrent::ConfigurationError] if this thread pool has already been set
|
71
|
+
def global_task_pool=(executor)
|
72
|
+
@global_task_pool.reconfigure { executor } or
|
73
|
+
raise ConfigurationError.new('global task pool was already set')
|
74
|
+
end
|
75
|
+
|
76
|
+
# Global thread pool optimized for long *operations*.
|
77
|
+
#
|
78
|
+
# A global thread pool must be set as soon as the gem is loaded. Setting a new
|
79
|
+
# thread pool once tasks and operations have been post can lead to unpredictable
|
80
|
+
# results. The first time a task/operation is post a new thread pool will be
|
81
|
+
# created using the default configuration. Once set the thread pool cannot be
|
82
|
+
# changed. Thus, explicitly setting the thread pool must occur *before* any
|
83
|
+
# tasks/operations are post else an exception will be raised.
|
84
|
+
#
|
85
|
+
# @param [Executor] executor the executor to be used for this thread pool
|
86
|
+
#
|
87
|
+
# @return [ThreadPoolExecutor] the new thread pool
|
88
|
+
#
|
89
|
+
# @raise [Concurrent::ConfigurationError] if this thread pool has already been set
|
90
|
+
def global_operation_pool=(executor)
|
91
|
+
@global_operation_pool.reconfigure { executor } or
|
92
|
+
raise ConfigurationError.new('global operation pool was already set')
|
93
|
+
end
|
94
|
+
|
95
|
+
def new_task_pool
|
96
|
+
Concurrent::ThreadPoolExecutor.new(
|
97
|
+
min_threads: [2, Concurrent.processor_count].max,
|
98
|
+
max_threads: [20, Concurrent.processor_count * 15].max,
|
99
|
+
idletime: 2 * 60, # 2 minutes
|
100
|
+
max_queue: 0, # unlimited
|
101
|
+
overflow_policy: :abort # raise an exception
|
102
|
+
)
|
103
|
+
end
|
104
|
+
|
105
|
+
def new_operation_pool
|
106
|
+
Concurrent::ThreadPoolExecutor.new(
|
107
|
+
min_threads: [2, Concurrent.processor_count].max,
|
108
|
+
max_threads: [2, Concurrent.processor_count].max,
|
109
|
+
idletime: 10 * 60, # 10 minutes
|
110
|
+
max_queue: [20, Concurrent.processor_count * 15].max,
|
111
|
+
overflow_policy: :abort # raise an exception
|
112
|
+
)
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
# create the default configuration on load
|
117
|
+
@configuration = Atomic.new Configuration.new
|
118
|
+
|
119
|
+
# @return [Configuration]
|
120
|
+
def self.configuration
|
121
|
+
@configuration.value
|
122
|
+
end
|
123
|
+
|
124
|
+
# Perform gem-level configuration.
|
125
|
+
#
|
126
|
+
# @yield the configuration commands
|
127
|
+
# @yieldparam [Configuration] the current configuration object
|
128
|
+
def self.configure
|
129
|
+
yield(configuration)
|
130
|
+
end
|
131
|
+
|
132
|
+
private
|
133
|
+
|
134
|
+
# Attempt to properly shutdown the given executor using the `shutdown` or
|
135
|
+
# `kill` method when available.
|
136
|
+
#
|
137
|
+
# @param [Executor] executor the executor to shutdown
|
138
|
+
#
|
139
|
+
# @return [Boolean] `true` if the executor is successfully shut down or `nil`, else `false`
|
140
|
+
def self.finalize_executor(executor)
|
141
|
+
return true if executor.nil?
|
142
|
+
if executor.respond_to?(:shutdown)
|
143
|
+
executor.shutdown
|
144
|
+
elsif executor.respond_to?(:kill)
|
145
|
+
executor.kill
|
146
|
+
end
|
147
|
+
true
|
148
|
+
rescue => ex
|
149
|
+
log DEBUG, ex
|
150
|
+
false
|
151
|
+
end
|
152
|
+
|
153
|
+
|
154
|
+
# set exit hook to shutdown global thread pools
|
155
|
+
at_exit do
|
156
|
+
self.finalize_executor(self.configuration.global_timer_set)
|
157
|
+
self.finalize_executor(self.configuration.global_task_pool)
|
158
|
+
self.finalize_executor(self.configuration.global_operation_pool)
|
159
|
+
# TODO may break other test suites using concurrent-ruby, terminates before test is run
|
160
|
+
end
|
161
|
+
end
|
@@ -0,0 +1,108 @@
|
|
1
|
+
require 'concurrent/future'
|
2
|
+
require 'concurrent/atomic/atomic_fixnum'
|
3
|
+
require 'concurrent/executor/per_thread_executor'
|
4
|
+
|
5
|
+
module Concurrent
|
6
|
+
|
7
|
+
# @!visibility private
|
8
|
+
class DependencyCounter # :nodoc:
|
9
|
+
|
10
|
+
def initialize(count, &block)
|
11
|
+
@counter = AtomicFixnum.new(count)
|
12
|
+
@block = block
|
13
|
+
end
|
14
|
+
|
15
|
+
def update(time, value, reason)
|
16
|
+
if @counter.decrement == 0
|
17
|
+
@block.call
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
# Dataflow allows you to create a task that will be scheduled then all of its
|
23
|
+
# data dependencies are available. Data dependencies are `Future` values. The
|
24
|
+
# dataflow task itself is also a `Future` value, so you can build up a graph of
|
25
|
+
# these tasks, each of which is run when all the data and other tasks it depends
|
26
|
+
# on are available or completed.
|
27
|
+
#
|
28
|
+
# Our syntax is somewhat related to that of Akka's `flow` and Habanero Java's
|
29
|
+
# `DataDrivenFuture`. However unlike Akka we don't schedule a task at all until
|
30
|
+
# it is ready to run, and unlike Habanero Java we pass the data values into the
|
31
|
+
# task instead of dereferencing them again in the task.
|
32
|
+
#
|
33
|
+
# The theory of dataflow goes back to the 80s. In the terminology of the literature,
|
34
|
+
# our implementation is coarse-grained, in that each task can be many instructions,
|
35
|
+
# and dynamic in that you can create more tasks within other tasks.
|
36
|
+
#
|
37
|
+
# @example Parallel Fibonacci calculator
|
38
|
+
# def fib(n)
|
39
|
+
# if n < 2
|
40
|
+
# Concurrent::dataflow { n }
|
41
|
+
# else
|
42
|
+
# n1 = fib(n - 1)
|
43
|
+
# n2 = fib(n - 2)
|
44
|
+
# Concurrent::dataflow(n1, n2) { |v1, v2| v1 + v2 }
|
45
|
+
# end
|
46
|
+
# end
|
47
|
+
#
|
48
|
+
# f = fib(14) #=> #<Concurrent::Future:0x000001019a26d8 ...
|
49
|
+
#
|
50
|
+
# # wait up to 1 second for the answer...
|
51
|
+
# f.value(1) #=> 377
|
52
|
+
#
|
53
|
+
# @param [Future] inputs zero or more `Future` operations that this dataflow depends upon
|
54
|
+
#
|
55
|
+
# @yield The operation to perform once all the dependencies are met
|
56
|
+
# @yieldparam [Future] inputs each of the `Future` inputs to the dataflow
|
57
|
+
# @yieldreturn [Object] the result of the block operation
|
58
|
+
#
|
59
|
+
# @return [Object] the result of all the operations
|
60
|
+
#
|
61
|
+
# @raise [ArgumentError] if no block is given
|
62
|
+
# @raise [ArgumentError] if any of the inputs are not `IVar`s
|
63
|
+
def dataflow(*inputs, &block)
|
64
|
+
dataflow_with(Concurrent.configuration.global_operation_pool, *inputs, &block)
|
65
|
+
end
|
66
|
+
module_function :dataflow
|
67
|
+
|
68
|
+
def dataflow_with(executor, *inputs, &block)
|
69
|
+
call_dataflow(:value, executor, *inputs, &block)
|
70
|
+
end
|
71
|
+
module_function :dataflow_with
|
72
|
+
|
73
|
+
def dataflow!(*inputs, &block)
|
74
|
+
dataflow_with!(Concurrent.configuration.global_task_pool, *inputs, &block)
|
75
|
+
end
|
76
|
+
module_function :dataflow!
|
77
|
+
|
78
|
+
def dataflow_with!(executor, *inputs, &block)
|
79
|
+
call_dataflow(:value!, executor, *inputs, &block)
|
80
|
+
end
|
81
|
+
module_function :dataflow_with!
|
82
|
+
|
83
|
+
private
|
84
|
+
|
85
|
+
def call_dataflow(method, executor, *inputs, &block)
|
86
|
+
raise ArgumentError.new('an executor must be provided') if executor.nil?
|
87
|
+
raise ArgumentError.new('no block given') unless block_given?
|
88
|
+
raise ArgumentError.new('not all dependencies are IVars') unless inputs.all? { |input| input.is_a? IVar }
|
89
|
+
|
90
|
+
result = Future.new(executor: executor) do
|
91
|
+
values = inputs.map { |input| input.send(method) }
|
92
|
+
block.call(*values)
|
93
|
+
end
|
94
|
+
|
95
|
+
if inputs.empty?
|
96
|
+
result.execute
|
97
|
+
else
|
98
|
+
counter = DependencyCounter.new(inputs.size) { result.execute }
|
99
|
+
|
100
|
+
inputs.each do |input|
|
101
|
+
input.add_observer counter
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
result
|
106
|
+
end
|
107
|
+
module_function :call_dataflow
|
108
|
+
end
|
@@ -0,0 +1,104 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require 'concurrent/obligation'
|
3
|
+
require 'concurrent/options_parser'
|
4
|
+
|
5
|
+
module Concurrent
|
6
|
+
|
7
|
+
# Lazy evaluation of a block yielding an immutable result. Useful for expensive
|
8
|
+
# operations that may never be needed.
|
9
|
+
#
|
10
|
+
# A `Delay` is similar to `Future` but solves a different problem.
|
11
|
+
# Where a `Future` schedules an operation for immediate execution and
|
12
|
+
# performs the operation asynchronously, a `Delay` (as the name implies)
|
13
|
+
# delays execution of the operation until the result is actually needed.
|
14
|
+
#
|
15
|
+
# When a `Delay` is created its state is set to `pending`. The value and
|
16
|
+
# reason are both `nil`. The first time the `#value` method is called the
|
17
|
+
# enclosed opration will be run and the calling thread will block. Other
|
18
|
+
# threads attempting to call `#value` will block as well. Once the operation
|
19
|
+
# is complete the *value* will be set to the result of the operation or the
|
20
|
+
# *reason* will be set to the raised exception, as appropriate. All threads
|
21
|
+
# blocked on `#value` will return. Subsequent calls to `#value` will immediately
|
22
|
+
# return the cached value. The operation will only be run once. This means that
|
23
|
+
# any side effects created by the operation will only happen once as well.
|
24
|
+
#
|
25
|
+
# `Delay` includes the `Concurrent::Dereferenceable` mixin to support thread
|
26
|
+
# safety of the reference returned by `#value`.
|
27
|
+
#
|
28
|
+
# @since 0.6.0
|
29
|
+
#
|
30
|
+
# @see Concurrent::Dereferenceable
|
31
|
+
#
|
32
|
+
# @see http://clojuredocs.org/clojure_core/clojure.core/delay
|
33
|
+
# @see http://aphyr.com/posts/306-clojure-from-the-ground-up-state
|
34
|
+
class Delay
|
35
|
+
include Obligation
|
36
|
+
|
37
|
+
# Create a new `Delay` in the `:pending` state.
|
38
|
+
#
|
39
|
+
# @yield the delayed operation to perform
|
40
|
+
#
|
41
|
+
# @param [Hash] opts the options to create a message with
|
42
|
+
# @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
|
43
|
+
# @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
|
44
|
+
# @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
|
45
|
+
# returning the value returned from the proc
|
46
|
+
#
|
47
|
+
# @raise [ArgumentError] if no block is given
|
48
|
+
def initialize(opts = {}, &block)
|
49
|
+
raise ArgumentError.new('no block given') unless block_given?
|
50
|
+
|
51
|
+
init_obligation
|
52
|
+
@state = :pending
|
53
|
+
@task = block
|
54
|
+
set_deref_options(opts)
|
55
|
+
@task_executor = OptionsParser.get_task_executor_from(opts)
|
56
|
+
@computing = false
|
57
|
+
end
|
58
|
+
|
59
|
+
def wait(timeout)
|
60
|
+
execute_task_once
|
61
|
+
super timeout
|
62
|
+
end
|
63
|
+
|
64
|
+
# reconfigures the block returning the value if still #incomplete?
|
65
|
+
# @yield the delayed operation to perform
|
66
|
+
# @return [true, false] if success
|
67
|
+
def reconfigure(&block)
|
68
|
+
mutex.lock
|
69
|
+
raise ArgumentError.new('no block given') unless block_given?
|
70
|
+
unless @computing
|
71
|
+
@task = block
|
72
|
+
true
|
73
|
+
else
|
74
|
+
false
|
75
|
+
end
|
76
|
+
ensure
|
77
|
+
mutex.unlock
|
78
|
+
end
|
79
|
+
|
80
|
+
private
|
81
|
+
|
82
|
+
def execute_task_once
|
83
|
+
mutex.lock
|
84
|
+
execute = @computing = true unless @computing
|
85
|
+
task = @task
|
86
|
+
mutex.unlock
|
87
|
+
|
88
|
+
if execute
|
89
|
+
@task_executor.post do
|
90
|
+
begin
|
91
|
+
result = task.call
|
92
|
+
success = true
|
93
|
+
rescue => ex
|
94
|
+
reason = ex
|
95
|
+
end
|
96
|
+
mutex.lock
|
97
|
+
set_state success, result, reason
|
98
|
+
event.set
|
99
|
+
mutex.unlock
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
@@ -0,0 +1,101 @@
|
|
1
|
+
module Concurrent
|
2
|
+
|
3
|
+
# Object references in Ruby are mutable. This can lead to serious problems when
|
4
|
+
# the `#value` of a concurrent object is a mutable reference. Which is always the
|
5
|
+
# case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type.
|
6
|
+
# Most classes in this library that expose a `#value` getter method do so using
|
7
|
+
# this mixin module.
|
8
|
+
module Dereferenceable
|
9
|
+
|
10
|
+
# Return the value this object represents after applying the options specified
|
11
|
+
# by the `#set_deref_options` method.
|
12
|
+
#
|
13
|
+
# When multiple deref options are set the order of operations is strictly defined.
|
14
|
+
# The order of deref operations is:
|
15
|
+
# * `:copy_on_deref`
|
16
|
+
# * `:dup_on_deref`
|
17
|
+
# * `:freeze_on_deref`
|
18
|
+
#
|
19
|
+
# Because of this ordering there is no need to `#freeze` an object created by a
|
20
|
+
# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`.
|
21
|
+
# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is
|
22
|
+
# as close to the behavior of a "pure" functional language (like Erlang, Clojure,
|
23
|
+
# or Haskell) as we are likely to get in Ruby.
|
24
|
+
#
|
25
|
+
# This method is thread-safe and synchronized with the internal `#mutex`.
|
26
|
+
#
|
27
|
+
# @return [Object] the current value of the object
|
28
|
+
def value
|
29
|
+
mutex.lock
|
30
|
+
apply_deref_options(@value)
|
31
|
+
ensure
|
32
|
+
mutex.unlock
|
33
|
+
end
|
34
|
+
|
35
|
+
alias_method :deref, :value
|
36
|
+
|
37
|
+
protected
|
38
|
+
|
39
|
+
# Set the internal value of this object
|
40
|
+
#
|
41
|
+
# @param [Object] val the new value
|
42
|
+
def value=(val)
|
43
|
+
mutex.lock
|
44
|
+
@value = val
|
45
|
+
ensure
|
46
|
+
mutex.unlock
|
47
|
+
end
|
48
|
+
|
49
|
+
# A mutex lock used for synchronizing thread-safe operations. Methods defined
|
50
|
+
# by `Dereferenceable` are synchronized using the `Mutex` returned from this
|
51
|
+
# method. Operations performed by the including class that operate on the
|
52
|
+
# `@value` instance variable should be locked with this `Mutex`.
|
53
|
+
#
|
54
|
+
# @return [Mutex] the synchronization object
|
55
|
+
def mutex
|
56
|
+
@mutex
|
57
|
+
end
|
58
|
+
|
59
|
+
# Initializes the internal `Mutex`.
|
60
|
+
#
|
61
|
+
# @note This method *must* be called from within the constructor of the including class.
|
62
|
+
#
|
63
|
+
# @see #mutex
|
64
|
+
def init_mutex
|
65
|
+
@mutex = Mutex.new
|
66
|
+
end
|
67
|
+
|
68
|
+
# Set the options which define the operations #value performs before
|
69
|
+
# returning data to the caller (dereferencing).
|
70
|
+
#
|
71
|
+
# @note Most classes that include this module will call `#set_deref_options`
|
72
|
+
# from within the constructor, thus allowing these options to be set at
|
73
|
+
# object creation.
|
74
|
+
#
|
75
|
+
# @param [Hash] opts the options defining dereference behavior.
|
76
|
+
# @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
|
77
|
+
# @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
|
78
|
+
# @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
|
79
|
+
# returning the value returned from the proc
|
80
|
+
def set_deref_options(opts = {})
|
81
|
+
mutex.lock
|
82
|
+
@dup_on_deref = opts[:dup_on_deref] || opts[:dup]
|
83
|
+
@freeze_on_deref = opts[:freeze_on_deref] || opts[:freeze]
|
84
|
+
@copy_on_deref = opts[:copy_on_deref] || opts[:copy]
|
85
|
+
@do_nothing_on_deref = !(@dup_on_deref || @freeze_on_deref || @copy_on_deref)
|
86
|
+
nil
|
87
|
+
ensure
|
88
|
+
mutex.unlock
|
89
|
+
end
|
90
|
+
|
91
|
+
# @!visibility private
|
92
|
+
def apply_deref_options(value) # :nodoc:
|
93
|
+
return nil if value.nil?
|
94
|
+
return value if @do_nothing_on_deref
|
95
|
+
value = @copy_on_deref.call(value) if @copy_on_deref
|
96
|
+
value = value.dup if @dup_on_deref
|
97
|
+
value = value.freeze if @freeze_on_deref
|
98
|
+
value
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|