concurrent-ruby 0.7.0.rc0-x64-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +15 -0
- data/LICENSE.txt +21 -0
- data/README.md +166 -0
- data/ext/concurrent_ruby_ext/atomic_reference.c +78 -0
- data/ext/concurrent_ruby_ext/atomic_reference.h +12 -0
- data/ext/concurrent_ruby_ext/extconf.rb +59 -0
- data/ext/concurrent_ruby_ext/rb_concurrent.c +28 -0
- data/lib/2.0/concurrent_ruby_ext.so +0 -0
- data/lib/concurrent.rb +45 -0
- data/lib/concurrent/actress.rb +221 -0
- data/lib/concurrent/actress/ad_hoc.rb +20 -0
- data/lib/concurrent/actress/context.rb +98 -0
- data/lib/concurrent/actress/core.rb +228 -0
- data/lib/concurrent/actress/core_delegations.rb +42 -0
- data/lib/concurrent/actress/envelope.rb +41 -0
- data/lib/concurrent/actress/errors.rb +14 -0
- data/lib/concurrent/actress/reference.rb +64 -0
- data/lib/concurrent/actress/type_check.rb +48 -0
- data/lib/concurrent/agent.rb +232 -0
- data/lib/concurrent/async.rb +319 -0
- data/lib/concurrent/atomic.rb +46 -0
- data/lib/concurrent/atomic/atomic_boolean.rb +157 -0
- data/lib/concurrent/atomic/atomic_fixnum.rb +162 -0
- data/lib/concurrent/atomic/condition.rb +67 -0
- data/lib/concurrent/atomic/copy_on_notify_observer_set.rb +118 -0
- data/lib/concurrent/atomic/copy_on_write_observer_set.rb +117 -0
- data/lib/concurrent/atomic/count_down_latch.rb +116 -0
- data/lib/concurrent/atomic/cyclic_barrier.rb +106 -0
- data/lib/concurrent/atomic/event.rb +98 -0
- data/lib/concurrent/atomic/thread_local_var.rb +117 -0
- data/lib/concurrent/atomic_reference/concurrent_update_error.rb +7 -0
- data/lib/concurrent/atomic_reference/delegated_update.rb +28 -0
- data/lib/concurrent/atomic_reference/direct_update.rb +28 -0
- data/lib/concurrent/atomic_reference/jruby.rb +8 -0
- data/lib/concurrent/atomic_reference/mutex_atomic.rb +47 -0
- data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +24 -0
- data/lib/concurrent/atomic_reference/rbx.rb +16 -0
- data/lib/concurrent/atomic_reference/ruby.rb +16 -0
- data/lib/concurrent/atomics.rb +10 -0
- data/lib/concurrent/channel/buffered_channel.rb +85 -0
- data/lib/concurrent/channel/channel.rb +41 -0
- data/lib/concurrent/channel/unbuffered_channel.rb +34 -0
- data/lib/concurrent/channel/waitable_list.rb +40 -0
- data/lib/concurrent/channels.rb +5 -0
- data/lib/concurrent/collection/blocking_ring_buffer.rb +71 -0
- data/lib/concurrent/collection/priority_queue.rb +305 -0
- data/lib/concurrent/collection/ring_buffer.rb +59 -0
- data/lib/concurrent/collections.rb +3 -0
- data/lib/concurrent/configuration.rb +158 -0
- data/lib/concurrent/dataflow.rb +91 -0
- data/lib/concurrent/delay.rb +112 -0
- data/lib/concurrent/dereferenceable.rb +101 -0
- data/lib/concurrent/errors.rb +30 -0
- data/lib/concurrent/exchanger.rb +34 -0
- data/lib/concurrent/executor/cached_thread_pool.rb +44 -0
- data/lib/concurrent/executor/executor.rb +229 -0
- data/lib/concurrent/executor/fixed_thread_pool.rb +33 -0
- data/lib/concurrent/executor/immediate_executor.rb +16 -0
- data/lib/concurrent/executor/java_cached_thread_pool.rb +31 -0
- data/lib/concurrent/executor/java_fixed_thread_pool.rb +33 -0
- data/lib/concurrent/executor/java_single_thread_executor.rb +21 -0
- data/lib/concurrent/executor/java_thread_pool_executor.rb +187 -0
- data/lib/concurrent/executor/per_thread_executor.rb +24 -0
- data/lib/concurrent/executor/ruby_cached_thread_pool.rb +29 -0
- data/lib/concurrent/executor/ruby_fixed_thread_pool.rb +32 -0
- data/lib/concurrent/executor/ruby_single_thread_executor.rb +73 -0
- data/lib/concurrent/executor/ruby_thread_pool_executor.rb +286 -0
- data/lib/concurrent/executor/ruby_thread_pool_worker.rb +72 -0
- data/lib/concurrent/executor/safe_task_executor.rb +35 -0
- data/lib/concurrent/executor/serialized_execution.rb +90 -0
- data/lib/concurrent/executor/single_thread_executor.rb +35 -0
- data/lib/concurrent/executor/thread_pool_executor.rb +68 -0
- data/lib/concurrent/executor/timer_set.rb +143 -0
- data/lib/concurrent/executors.rb +9 -0
- data/lib/concurrent/future.rb +124 -0
- data/lib/concurrent/ivar.rb +111 -0
- data/lib/concurrent/logging.rb +17 -0
- data/lib/concurrent/mvar.rb +200 -0
- data/lib/concurrent/obligation.rb +171 -0
- data/lib/concurrent/observable.rb +40 -0
- data/lib/concurrent/options_parser.rb +46 -0
- data/lib/concurrent/promise.rb +169 -0
- data/lib/concurrent/scheduled_task.rb +78 -0
- data/lib/concurrent/supervisor.rb +343 -0
- data/lib/concurrent/timer_task.rb +341 -0
- data/lib/concurrent/tvar.rb +252 -0
- data/lib/concurrent/utilities.rb +3 -0
- data/lib/concurrent/utility/processor_count.rb +150 -0
- data/lib/concurrent/utility/timeout.rb +35 -0
- data/lib/concurrent/utility/timer.rb +21 -0
- data/lib/concurrent/version.rb +3 -0
- data/lib/concurrent_ruby.rb +1 -0
- data/lib/concurrent_ruby_ext.so +0 -0
- data/lib/extension_helper.rb +9 -0
- metadata +141 -0
@@ -0,0 +1,35 @@
|
|
1
|
+
require 'concurrent/executor/ruby_single_thread_executor'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
if RUBY_PLATFORM == 'java'
|
6
|
+
|
7
|
+
require 'concurrent/executor/java_single_thread_executor'
|
8
|
+
|
9
|
+
# @!macro [attach] single_thread_executor
|
10
|
+
#
|
11
|
+
# A thread pool with a set number of threads. The number of threads in the pool
|
12
|
+
# is set on construction and remains constant. When all threads are busy new
|
13
|
+
# tasks `#post` to the thread pool are enqueued until a thread becomes available.
|
14
|
+
# Should a thread crash for any reason the thread will immediately be removed
|
15
|
+
# from the pool and replaced.
|
16
|
+
#
|
17
|
+
# The API and behavior of this class are based on Java's `SingleThreadExecutor`
|
18
|
+
#
|
19
|
+
# @note When running on the JVM (JRuby) this class will inherit from `JavaSingleThreadExecutor`.
|
20
|
+
# On all other platforms it will inherit from `RubySingleThreadExecutor`.
|
21
|
+
#
|
22
|
+
# @see Concurrent::RubySingleThreadExecutor
|
23
|
+
# @see Concurrent::JavaSingleThreadExecutor
|
24
|
+
#
|
25
|
+
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
|
26
|
+
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
|
27
|
+
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html
|
28
|
+
class SingleThreadExecutor < JavaSingleThreadExecutor
|
29
|
+
end
|
30
|
+
else
|
31
|
+
# @!macro single_thread_executor
|
32
|
+
class SingleThreadExecutor < RubySingleThreadExecutor
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -0,0 +1,68 @@
|
|
1
|
+
require 'concurrent/executor/ruby_thread_pool_executor'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
if RUBY_PLATFORM == 'java'
|
6
|
+
require 'concurrent/executor/java_thread_pool_executor'
|
7
|
+
# @!macro [attach] thread_pool_executor
|
8
|
+
#
|
9
|
+
# An abstraction composed of one or more threads and a task queue. Tasks
|
10
|
+
# (blocks or `proc` objects) are submit to the pool and added to the queue.
|
11
|
+
# The threads in the pool remove the tasks and execute them in the order
|
12
|
+
# they were received. When there are more tasks queued than there are
|
13
|
+
# threads to execute them the pool will create new threads, up to the
|
14
|
+
# configured maximum. Similarly, threads that are idle for too long will
|
15
|
+
# be garbage collected, down to the configured minimum options. Should a
|
16
|
+
# thread crash it, too, will be garbage collected.
|
17
|
+
#
|
18
|
+
# `ThreadPoolExecutor` is based on the Java class of the same name. From
|
19
|
+
# the official Java documentationa;
|
20
|
+
#
|
21
|
+
# > Thread pools address two different problems: they usually provide
|
22
|
+
# > improved performance when executing large numbers of asynchronous tasks,
|
23
|
+
# > due to reduced per-task invocation overhead, and they provide a means
|
24
|
+
# > of bounding and managing the resources, including threads, consumed
|
25
|
+
# > when executing a collection of tasks. Each ThreadPoolExecutor also
|
26
|
+
# > maintains some basic statistics, such as the number of completed tasks.
|
27
|
+
# >
|
28
|
+
# > To be useful across a wide range of contexts, this class provides many
|
29
|
+
# > adjustable parameters and extensibility hooks. However, programmers are
|
30
|
+
# > urged to use the more convenient Executors factory methods
|
31
|
+
# > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation),
|
32
|
+
# > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single
|
33
|
+
# > background thread), that preconfigure settings for the most common usage
|
34
|
+
# > scenarios.
|
35
|
+
#
|
36
|
+
# Thread pools support several configuration options:
|
37
|
+
#
|
38
|
+
# * `max_threads`: The maximum number of threads that may be created in the pool.
|
39
|
+
# * `min_threads`: The minimum number of threads that may be retained in the pool.
|
40
|
+
# * `idletime`: The number of seconds that a thread may be idle before being reclaimed.
|
41
|
+
# * `max_queue`: The maximum number of tasks that may be waiting in the work queue at
|
42
|
+
# any one time. When the queue size reaches `max_queue` subsequent tasks will be
|
43
|
+
# rejected in accordance with the configured `overflow_policy`.
|
44
|
+
# * `overflow_policy`: The policy defining how rejected tasks are handled. #
|
45
|
+
#
|
46
|
+
# Three overflow policies are supported:
|
47
|
+
#
|
48
|
+
# * `:abort`: Raise a `RejectedExecutionError` exception and discard the task.
|
49
|
+
# * `:discard`: Silently discard the task and return `nil` as the task result.
|
50
|
+
# * `:caller_runs`: Execute the task on the calling thread.
|
51
|
+
#
|
52
|
+
# @note When running on the JVM (JRuby) this class will inherit from `JavaThreadPoolExecutor`.
|
53
|
+
# On all other platforms it will inherit from `RubyThreadPoolExecutor`.
|
54
|
+
#
|
55
|
+
# @see Concurrent::RubyThreadPoolExecutor
|
56
|
+
# @see Concurrent::JavaThreadPoolExecutor
|
57
|
+
#
|
58
|
+
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
|
59
|
+
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
|
60
|
+
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html
|
61
|
+
class ThreadPoolExecutor < JavaThreadPoolExecutor
|
62
|
+
end
|
63
|
+
else
|
64
|
+
# @!macro thread_pool_executor
|
65
|
+
class ThreadPoolExecutor < RubyThreadPoolExecutor
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
@@ -0,0 +1,143 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require_relative 'executor'
|
3
|
+
require 'concurrent/options_parser'
|
4
|
+
require 'concurrent/atomic/event'
|
5
|
+
require 'concurrent/collection/priority_queue'
|
6
|
+
require 'concurrent/executor/single_thread_executor'
|
7
|
+
|
8
|
+
module Concurrent
|
9
|
+
|
10
|
+
# Executes a collection of tasks at the specified times. A master thread
|
11
|
+
# monitors the set and schedules each task for execution at the appropriate
|
12
|
+
# time. Tasks are run on the global task pool or on the supplied executor.
|
13
|
+
class TimerSet
|
14
|
+
include RubyExecutor
|
15
|
+
|
16
|
+
# Create a new set of timed tasks.
|
17
|
+
#
|
18
|
+
# @param [Hash] opts the options controlling how the future will be processed
|
19
|
+
# @option opts [Boolean] :operation (false) when `true` will execute the future on the global
|
20
|
+
# operation pool (for long-running operations), when `false` will execute the future on the
|
21
|
+
# global task pool (for short-running tasks)
|
22
|
+
# @option opts [object] :executor when provided will run all operations on
|
23
|
+
# this executor rather than the global thread pool (overrides :operation)
|
24
|
+
def initialize(opts = {})
|
25
|
+
@queue = PriorityQueue.new(order: :min)
|
26
|
+
@task_executor = OptionsParser::get_executor_from(opts)
|
27
|
+
@timer_executor = SingleThreadExecutor.new
|
28
|
+
@condition = Condition.new
|
29
|
+
init_executor
|
30
|
+
end
|
31
|
+
|
32
|
+
# Post a task to be execute at the specified time. The given time may be either
|
33
|
+
# a `Time` object or the number of seconds to wait. If the intended execution
|
34
|
+
# time is within 1/100th of a second of the current time the task will be
|
35
|
+
# immediately post to the executor.
|
36
|
+
#
|
37
|
+
# @param [Object] intended_time the time to schedule the task for execution
|
38
|
+
#
|
39
|
+
# @yield the task to be performed
|
40
|
+
#
|
41
|
+
# @return [Boolean] true if the message is post, false after shutdown
|
42
|
+
#
|
43
|
+
# @raise [ArgumentError] if the intended execution time is not in the future
|
44
|
+
# @raise [ArgumentError] if no block is given
|
45
|
+
def post(intended_time, *args, &task)
|
46
|
+
time = TimerSet.calculate_schedule_time(intended_time).to_f
|
47
|
+
raise ArgumentError.new('no block given') unless block_given?
|
48
|
+
|
49
|
+
mutex.synchronize do
|
50
|
+
return false unless running?
|
51
|
+
|
52
|
+
if (time - Time.now.to_f) <= 0.01
|
53
|
+
@task_executor.post(*args, &task)
|
54
|
+
else
|
55
|
+
@queue.push(Task.new(time, args, task))
|
56
|
+
@timer_executor.post(&method(:process_tasks))
|
57
|
+
end
|
58
|
+
|
59
|
+
true
|
60
|
+
end
|
61
|
+
|
62
|
+
end
|
63
|
+
|
64
|
+
# For a timer, #kill is like an orderly shutdown, except we need to manually
|
65
|
+
# (and destructively) clear the queue first
|
66
|
+
def kill
|
67
|
+
@queue.clear
|
68
|
+
shutdown
|
69
|
+
end
|
70
|
+
|
71
|
+
# Calculate an Epoch time with milliseconds at which to execute a
|
72
|
+
# task. If the given time is a `Time` object it will be converted
|
73
|
+
# accordingly. If the time is an integer value greater than zero
|
74
|
+
# it will be understood as a number of seconds in the future and
|
75
|
+
# will be added to the current time to calculate Epoch.
|
76
|
+
#
|
77
|
+
# @param [Object] intended_time the time (as a `Time` object or an integer)
|
78
|
+
# to schedule the task for execution
|
79
|
+
# @param [Time] now (Time.now) the time from which to calculate an interval
|
80
|
+
#
|
81
|
+
# @return [Fixnum] the intended time as seconds/millis from Epoch
|
82
|
+
#
|
83
|
+
# @raise [ArgumentError] if the intended execution time is not in the future
|
84
|
+
def self.calculate_schedule_time(intended_time, now = Time.now)
|
85
|
+
if intended_time.is_a?(Time)
|
86
|
+
raise ArgumentError.new('schedule time must be in the future') if intended_time <= now
|
87
|
+
intended_time
|
88
|
+
else
|
89
|
+
raise ArgumentError.new('seconds must be greater than zero') if intended_time.to_f < 0.0
|
90
|
+
now + intended_time
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
private
|
95
|
+
|
96
|
+
# A struct for encapsulating a task and its intended execution time.
|
97
|
+
# It facilitates proper prioritization by overriding the comparison
|
98
|
+
# (spaceship) operator as a comparison of the intended execution
|
99
|
+
# times.
|
100
|
+
#
|
101
|
+
# @!visibility private
|
102
|
+
Task = Struct.new(:time, :args, :op) do
|
103
|
+
include Comparable
|
104
|
+
|
105
|
+
def <=>(other)
|
106
|
+
self.time <=> other.time
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
private_constant :Task
|
111
|
+
|
112
|
+
# @!visibility private
|
113
|
+
def shutdown_execution
|
114
|
+
@queue.clear
|
115
|
+
@timer_executor.kill
|
116
|
+
stopped_event.set
|
117
|
+
end
|
118
|
+
|
119
|
+
# Run a loop and execute tasks in the scheduled order and at the approximate
|
120
|
+
# scheduled time. If no tasks remain the thread will exit gracefully so that
|
121
|
+
# garbage collection can occur. If there are no ready tasks it will sleep
|
122
|
+
# for up to 60 seconds waiting for the next scheduled task.
|
123
|
+
#
|
124
|
+
# @!visibility private
|
125
|
+
def process_tasks
|
126
|
+
loop do
|
127
|
+
break if @queue.empty?
|
128
|
+
|
129
|
+
task = @queue.peek
|
130
|
+
interval = task.time - Time.now.to_f
|
131
|
+
|
132
|
+
if interval <= 0
|
133
|
+
@task_executor.post(*task.args, &task.op)
|
134
|
+
@queue.pop
|
135
|
+
else
|
136
|
+
mutex.synchronize do
|
137
|
+
@condition.wait(mutex, [interval, 60].min)
|
138
|
+
end
|
139
|
+
end
|
140
|
+
end
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
@@ -0,0 +1,9 @@
|
|
1
|
+
require 'concurrent/executor/cached_thread_pool'
|
2
|
+
require 'concurrent/executor/fixed_thread_pool'
|
3
|
+
require 'concurrent/executor/immediate_executor'
|
4
|
+
require 'concurrent/executor/per_thread_executor'
|
5
|
+
require 'concurrent/executor/safe_task_executor'
|
6
|
+
require 'concurrent/executor/single_thread_executor'
|
7
|
+
require 'concurrent/executor/thread_pool_executor'
|
8
|
+
require 'concurrent/executor/timer_set'
|
9
|
+
require 'concurrent/executor/serialized_execution'
|
@@ -0,0 +1,124 @@
|
|
1
|
+
require 'thread'
|
2
|
+
|
3
|
+
require 'concurrent/options_parser'
|
4
|
+
require 'concurrent/executor/safe_task_executor'
|
5
|
+
|
6
|
+
module Concurrent
|
7
|
+
|
8
|
+
# A `Future` represents a promise to complete an action at some time in the future.
|
9
|
+
# The action is atomic and permanent. The idea behind a future is to send an operation
|
10
|
+
# for asynchronous completion, do other stuff, then return and retrieve the result
|
11
|
+
# of the async operation at a later time.
|
12
|
+
#
|
13
|
+
# A `Future` has four possible states: *:unscheduled*, *:pending*, *:rejected*, or *:fulfilled*.
|
14
|
+
# When a `Future` is created its state is set to *:unscheduled*. Once the `#execute` method is
|
15
|
+
# called the state becomes *:pending* and will remain in that state until processing is
|
16
|
+
# complete. A completed `Future` is either *:rejected*, indicating that an exception was
|
17
|
+
# thrown during processing, or *:fulfilled*, indicating success. If a `Future` is *:fulfilled*
|
18
|
+
# its `value` will be updated to reflect the result of the operation. If *:rejected* the
|
19
|
+
# `reason` will be updated with a reference to the thrown exception. The predicate methods
|
20
|
+
# `#unscheduled?`, `#pending?`, `#rejected?`, and `fulfilled?` can be called at any time to
|
21
|
+
# obtain the state of the `Future`, as can the `#state` method, which returns a symbol.
|
22
|
+
#
|
23
|
+
# Retrieving the value of a `Future` is done through the `#value` (alias: `#deref`) method.
|
24
|
+
# Obtaining the value of a `Future` is a potentially blocking operation. When a `Future` is
|
25
|
+
# *:rejected* a call to `#value` will return `nil` immediately. When a `Future` is
|
26
|
+
# *:fulfilled* a call to `#value` will immediately return the current value. When a
|
27
|
+
# `Future` is *:pending* a call to `#value` will block until the `Future` is either
|
28
|
+
# *:rejected* or *:fulfilled*. A *timeout* value can be passed to `#value` to limit how
|
29
|
+
# long the call will block. If `nil` the call will block indefinitely. If `0` the call will
|
30
|
+
# not block. Any other integer or float value will indicate the maximum number of seconds to block.
|
31
|
+
#
|
32
|
+
# The `Future` class also includes the behavior of the Ruby standard library `Observable` module,
|
33
|
+
# but does so in a thread-safe way. On fulfillment or rejection all observers will be notified
|
34
|
+
# according to the normal `Observable` behavior. The observer callback function will be called
|
35
|
+
# with three parameters: the `Time` of fulfillment/rejection, the final `value`, and the final
|
36
|
+
# `reason`. Observers added after fulfillment/rejection will still be notified as normal.
|
37
|
+
#
|
38
|
+
# @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module
|
39
|
+
# @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function
|
40
|
+
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html java.util.concurrent.Future
|
41
|
+
class Future < IVar
|
42
|
+
|
43
|
+
# Create a new `Future` in the `:unscheduled` state.
|
44
|
+
#
|
45
|
+
# @yield the asynchronous operation to perform
|
46
|
+
#
|
47
|
+
# @param [Hash] opts the options controlling how the future will be processed
|
48
|
+
# @option opts [Boolean] :operation (false) when `true` will execute the future on the global
|
49
|
+
# operation pool (for long-running operations), when `false` will execute the future on the
|
50
|
+
# global task pool (for short-running tasks)
|
51
|
+
# @option opts [object] :executor when provided will run all operations on
|
52
|
+
# this executor rather than the global thread pool (overrides :operation)
|
53
|
+
# @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
|
54
|
+
# @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
|
55
|
+
# @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
|
56
|
+
# returning the value returned from the proc
|
57
|
+
#
|
58
|
+
# @raise [ArgumentError] if no block is given
|
59
|
+
def initialize(opts = {}, &block)
|
60
|
+
raise ArgumentError.new('no block given') unless block_given?
|
61
|
+
super(IVar::NO_VALUE, opts)
|
62
|
+
@state = :unscheduled
|
63
|
+
@task = block
|
64
|
+
@executor = OptionsParser::get_executor_from(opts)
|
65
|
+
end
|
66
|
+
|
67
|
+
# Execute an `:unscheduled` `Future`. Immediately sets the state to `:pending` and
|
68
|
+
# passes the block to a new thread/thread pool for eventual execution.
|
69
|
+
# Does nothing if the `Future` is in any state other than `:unscheduled`.
|
70
|
+
#
|
71
|
+
# @return [Future] a reference to `self`
|
72
|
+
#
|
73
|
+
# @example Instance and execute in separate steps
|
74
|
+
# future = Concurrent::Future.new{ sleep(1); 42 }
|
75
|
+
# future.state #=> :unscheduled
|
76
|
+
# future.execute
|
77
|
+
# future.state #=> :pending
|
78
|
+
#
|
79
|
+
# @example Instance and execute in one line
|
80
|
+
# future = Concurrent::Future.new{ sleep(1); 42 }.execute
|
81
|
+
# future.state #=> :pending
|
82
|
+
#
|
83
|
+
# @since 0.5.0
|
84
|
+
def execute
|
85
|
+
if compare_and_set_state(:pending, :unscheduled)
|
86
|
+
@executor.post{ work }
|
87
|
+
self
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
# Create a new `Future` object with the given block, execute it, and return the
|
92
|
+
# `:pending` object.
|
93
|
+
#
|
94
|
+
# @yield the asynchronous operation to perform
|
95
|
+
#
|
96
|
+
# @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
|
97
|
+
# @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
|
98
|
+
# @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
|
99
|
+
# returning the value returned from the proc
|
100
|
+
#
|
101
|
+
# @return [Future] the newly created `Future` in the `:pending` state
|
102
|
+
#
|
103
|
+
# @raise [ArgumentError] if no block is given
|
104
|
+
#
|
105
|
+
# @example
|
106
|
+
# future = Concurrent::Future.execute{ sleep(1); 42 }
|
107
|
+
# future.state #=> :pending
|
108
|
+
#
|
109
|
+
# @since 0.5.0
|
110
|
+
def self.execute(opts = {}, &block)
|
111
|
+
Future.new(opts, &block).execute
|
112
|
+
end
|
113
|
+
|
114
|
+
protected :set, :fail, :complete
|
115
|
+
|
116
|
+
private
|
117
|
+
|
118
|
+
# @!visibility private
|
119
|
+
def work # :nodoc:
|
120
|
+
success, val, reason = SafeTaskExecutor.new(@task).execute
|
121
|
+
complete(success, val, reason)
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
@@ -0,0 +1,111 @@
|
|
1
|
+
require 'thread'
|
2
|
+
|
3
|
+
require 'concurrent/errors'
|
4
|
+
require 'concurrent/obligation'
|
5
|
+
require 'concurrent/observable'
|
6
|
+
|
7
|
+
module Concurrent
|
8
|
+
|
9
|
+
# An `IVar` is a single-element container that is normally created empty, and
|
10
|
+
# can only be set once. The I in `IVar` stands for immutable. Reading an `IVar`
|
11
|
+
# normally blocks until it is set. It is safe to set and read an `IVar` from
|
12
|
+
# different threads.
|
13
|
+
#
|
14
|
+
# If you want to have some parallel task set the value in an `IVar`, you want
|
15
|
+
# a `Future`. If you want to create a graph of parallel tasks all executed when
|
16
|
+
# the values they depend on are ready you want `dataflow`. `IVar` is generally
|
17
|
+
# a low-level primitive.
|
18
|
+
#
|
19
|
+
# @example Create, set and get an `IVar`
|
20
|
+
# ivar = Concurrent::IVar.new
|
21
|
+
# ivar.set 14
|
22
|
+
# ivar.get #=> 14
|
23
|
+
# ivar.set 2 # would now be an error
|
24
|
+
class IVar
|
25
|
+
|
26
|
+
include Obligation
|
27
|
+
include Observable
|
28
|
+
|
29
|
+
# @!visibility private
|
30
|
+
NO_VALUE = Object.new # :nodoc:
|
31
|
+
|
32
|
+
# Create a new `IVar` in the `:pending` state with the (optional) initial value.
|
33
|
+
#
|
34
|
+
# @param [Object] value the initial value
|
35
|
+
# @param [Hash] opts the options to create a message with
|
36
|
+
# @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
|
37
|
+
# @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
|
38
|
+
# @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
|
39
|
+
# returning the value returned from the proc
|
40
|
+
def initialize(value = NO_VALUE, opts = {})
|
41
|
+
init_obligation
|
42
|
+
self.observers = CopyOnWriteObserverSet.new
|
43
|
+
set_deref_options(opts)
|
44
|
+
|
45
|
+
if value == NO_VALUE
|
46
|
+
@state = :pending
|
47
|
+
else
|
48
|
+
set(value)
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
# Add an observer on this object that will receive notification on update.
|
53
|
+
#
|
54
|
+
# Upon completion the `IVar` will notify all observers in a thread-say way. The `func`
|
55
|
+
# method of the observer will be called with three arguments: the `Time` at which the
|
56
|
+
# `Future` completed the asynchronous operation, the final `value` (or `nil` on rejection),
|
57
|
+
# and the final `reason` (or `nil` on fulfillment).
|
58
|
+
#
|
59
|
+
# @param [Object] observer the object that will be notified of changes
|
60
|
+
# @param [Symbol] func symbol naming the method to call when this `Observable` has changes`
|
61
|
+
def add_observer(observer = nil, func = :update, &block)
|
62
|
+
raise ArgumentError.new('cannot provide both an observer and a block') if observer && block
|
63
|
+
direct_notification = false
|
64
|
+
|
65
|
+
if block
|
66
|
+
observer = block
|
67
|
+
func = :call
|
68
|
+
end
|
69
|
+
|
70
|
+
mutex.synchronize do
|
71
|
+
if event.set?
|
72
|
+
direct_notification = true
|
73
|
+
else
|
74
|
+
observers.add_observer(observer, func)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
observer.send(func, Time.now, self.value, reason) if direct_notification
|
79
|
+
observer
|
80
|
+
end
|
81
|
+
|
82
|
+
# Set the `IVar` to a value and wake or notify all threads waiting on it.
|
83
|
+
#
|
84
|
+
# @param [Object] value the value to store in the `IVar`
|
85
|
+
# @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already been set or otherwise completed
|
86
|
+
def set(value)
|
87
|
+
complete(true, value, nil)
|
88
|
+
end
|
89
|
+
|
90
|
+
# Set the `IVar` to failed due to some error and wake or notify all threads waiting on it.
|
91
|
+
#
|
92
|
+
# @param [Object] reason for the failure
|
93
|
+
# @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already been set or otherwise completed
|
94
|
+
def fail(reason = StandardError.new)
|
95
|
+
complete(false, nil, reason)
|
96
|
+
end
|
97
|
+
|
98
|
+
# @!visibility private
|
99
|
+
def complete(success, value, reason) # :nodoc:
|
100
|
+
mutex.synchronize do
|
101
|
+
raise MultipleAssignmentError.new('multiple assignment') if [:fulfilled, :rejected].include? @state
|
102
|
+
set_state(success, value, reason)
|
103
|
+
event.set
|
104
|
+
end
|
105
|
+
|
106
|
+
time = Time.now
|
107
|
+
observers.notify_and_delete_observers{ [time, self.value, reason] }
|
108
|
+
self
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|