concurrent-ruby 0.7.0-java

Sign up to get free protection for your applications and to get access to all the features.
Files changed (112) hide show
  1. data/LICENSE.txt +21 -0
  2. data/README.md +217 -0
  3. data/lib/concurrent.rb +45 -0
  4. data/lib/concurrent/actor.rb +104 -0
  5. data/lib/concurrent/actor/behaviour.rb +70 -0
  6. data/lib/concurrent/actor/behaviour/abstract.rb +48 -0
  7. data/lib/concurrent/actor/behaviour/awaits.rb +21 -0
  8. data/lib/concurrent/actor/behaviour/buffer.rb +54 -0
  9. data/lib/concurrent/actor/behaviour/errors_on_unknown_message.rb +12 -0
  10. data/lib/concurrent/actor/behaviour/executes_context.rb +18 -0
  11. data/lib/concurrent/actor/behaviour/linking.rb +42 -0
  12. data/lib/concurrent/actor/behaviour/pausing.rb +77 -0
  13. data/lib/concurrent/actor/behaviour/removes_child.rb +16 -0
  14. data/lib/concurrent/actor/behaviour/sets_results.rb +36 -0
  15. data/lib/concurrent/actor/behaviour/supervised.rb +58 -0
  16. data/lib/concurrent/actor/behaviour/supervising.rb +34 -0
  17. data/lib/concurrent/actor/behaviour/terminates_children.rb +13 -0
  18. data/lib/concurrent/actor/behaviour/termination.rb +54 -0
  19. data/lib/concurrent/actor/context.rb +153 -0
  20. data/lib/concurrent/actor/core.rb +213 -0
  21. data/lib/concurrent/actor/default_dead_letter_handler.rb +9 -0
  22. data/lib/concurrent/actor/envelope.rb +41 -0
  23. data/lib/concurrent/actor/errors.rb +27 -0
  24. data/lib/concurrent/actor/internal_delegations.rb +49 -0
  25. data/lib/concurrent/actor/public_delegations.rb +40 -0
  26. data/lib/concurrent/actor/reference.rb +81 -0
  27. data/lib/concurrent/actor/root.rb +37 -0
  28. data/lib/concurrent/actor/type_check.rb +48 -0
  29. data/lib/concurrent/actor/utils.rb +10 -0
  30. data/lib/concurrent/actor/utils/ad_hoc.rb +21 -0
  31. data/lib/concurrent/actor/utils/balancer.rb +40 -0
  32. data/lib/concurrent/actor/utils/broadcast.rb +52 -0
  33. data/lib/concurrent/actor/utils/pool.rb +59 -0
  34. data/lib/concurrent/actress.rb +3 -0
  35. data/lib/concurrent/agent.rb +230 -0
  36. data/lib/concurrent/async.rb +284 -0
  37. data/lib/concurrent/atomic.rb +91 -0
  38. data/lib/concurrent/atomic/atomic_boolean.rb +202 -0
  39. data/lib/concurrent/atomic/atomic_fixnum.rb +203 -0
  40. data/lib/concurrent/atomic/condition.rb +67 -0
  41. data/lib/concurrent/atomic/copy_on_notify_observer_set.rb +118 -0
  42. data/lib/concurrent/atomic/copy_on_write_observer_set.rb +117 -0
  43. data/lib/concurrent/atomic/count_down_latch.rb +116 -0
  44. data/lib/concurrent/atomic/cyclic_barrier.rb +106 -0
  45. data/lib/concurrent/atomic/event.rb +98 -0
  46. data/lib/concurrent/atomic/synchronization.rb +51 -0
  47. data/lib/concurrent/atomic/thread_local_var.rb +82 -0
  48. data/lib/concurrent/atomic_reference/concurrent_update_error.rb +8 -0
  49. data/lib/concurrent/atomic_reference/direct_update.rb +50 -0
  50. data/lib/concurrent/atomic_reference/jruby.rb +14 -0
  51. data/lib/concurrent/atomic_reference/mutex_atomic.rb +77 -0
  52. data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +25 -0
  53. data/lib/concurrent/atomic_reference/rbx.rb +19 -0
  54. data/lib/concurrent/atomic_reference/ruby.rb +37 -0
  55. data/lib/concurrent/atomics.rb +11 -0
  56. data/lib/concurrent/channel/buffered_channel.rb +85 -0
  57. data/lib/concurrent/channel/channel.rb +41 -0
  58. data/lib/concurrent/channel/unbuffered_channel.rb +35 -0
  59. data/lib/concurrent/channel/waitable_list.rb +40 -0
  60. data/lib/concurrent/channels.rb +5 -0
  61. data/lib/concurrent/collection/blocking_ring_buffer.rb +71 -0
  62. data/lib/concurrent/collection/priority_queue.rb +305 -0
  63. data/lib/concurrent/collection/ring_buffer.rb +59 -0
  64. data/lib/concurrent/collections.rb +3 -0
  65. data/lib/concurrent/configuration.rb +161 -0
  66. data/lib/concurrent/dataflow.rb +108 -0
  67. data/lib/concurrent/delay.rb +104 -0
  68. data/lib/concurrent/dereferenceable.rb +101 -0
  69. data/lib/concurrent/errors.rb +30 -0
  70. data/lib/concurrent/exchanger.rb +34 -0
  71. data/lib/concurrent/executor/cached_thread_pool.rb +44 -0
  72. data/lib/concurrent/executor/executor.rb +282 -0
  73. data/lib/concurrent/executor/fixed_thread_pool.rb +33 -0
  74. data/lib/concurrent/executor/immediate_executor.rb +65 -0
  75. data/lib/concurrent/executor/java_cached_thread_pool.rb +31 -0
  76. data/lib/concurrent/executor/java_fixed_thread_pool.rb +41 -0
  77. data/lib/concurrent/executor/java_single_thread_executor.rb +22 -0
  78. data/lib/concurrent/executor/java_thread_pool_executor.rb +180 -0
  79. data/lib/concurrent/executor/per_thread_executor.rb +100 -0
  80. data/lib/concurrent/executor/ruby_cached_thread_pool.rb +29 -0
  81. data/lib/concurrent/executor/ruby_fixed_thread_pool.rb +32 -0
  82. data/lib/concurrent/executor/ruby_single_thread_executor.rb +74 -0
  83. data/lib/concurrent/executor/ruby_thread_pool_executor.rb +288 -0
  84. data/lib/concurrent/executor/ruby_thread_pool_worker.rb +72 -0
  85. data/lib/concurrent/executor/safe_task_executor.rb +35 -0
  86. data/lib/concurrent/executor/serialized_execution.rb +126 -0
  87. data/lib/concurrent/executor/single_thread_executor.rb +35 -0
  88. data/lib/concurrent/executor/thread_pool_executor.rb +68 -0
  89. data/lib/concurrent/executor/timer_set.rb +143 -0
  90. data/lib/concurrent/executors.rb +9 -0
  91. data/lib/concurrent/future.rb +125 -0
  92. data/lib/concurrent/ivar.rb +111 -0
  93. data/lib/concurrent/lazy_register.rb +58 -0
  94. data/lib/concurrent/logging.rb +17 -0
  95. data/lib/concurrent/mvar.rb +200 -0
  96. data/lib/concurrent/obligation.rb +171 -0
  97. data/lib/concurrent/observable.rb +40 -0
  98. data/lib/concurrent/options_parser.rb +48 -0
  99. data/lib/concurrent/promise.rb +170 -0
  100. data/lib/concurrent/scheduled_task.rb +79 -0
  101. data/lib/concurrent/timer_task.rb +341 -0
  102. data/lib/concurrent/tvar.rb +248 -0
  103. data/lib/concurrent/utilities.rb +3 -0
  104. data/lib/concurrent/utility/processor_count.rb +152 -0
  105. data/lib/concurrent/utility/timeout.rb +35 -0
  106. data/lib/concurrent/utility/timer.rb +21 -0
  107. data/lib/concurrent/version.rb +3 -0
  108. data/lib/concurrent_ruby.rb +1 -0
  109. data/lib/concurrent_ruby_ext.jar +0 -0
  110. data/lib/concurrent_ruby_ext.so +0 -0
  111. data/lib/extension_helper.rb +28 -0
  112. metadata +163 -0
@@ -0,0 +1,35 @@
1
+ require 'concurrent/executor/ruby_single_thread_executor'
2
+
3
+ module Concurrent
4
+
5
+ if RUBY_PLATFORM == 'java'
6
+
7
+ require 'concurrent/executor/java_single_thread_executor'
8
+
9
+ # @!macro [attach] single_thread_executor
10
+ #
11
+ # A thread pool with a set number of threads. The number of threads in the pool
12
+ # is set on construction and remains constant. When all threads are busy new
13
+ # tasks `#post` to the thread pool are enqueued until a thread becomes available.
14
+ # Should a thread crash for any reason the thread will immediately be removed
15
+ # from the pool and replaced.
16
+ #
17
+ # The API and behavior of this class are based on Java's `SingleThreadExecutor`
18
+ #
19
+ # @note When running on the JVM (JRuby) this class will inherit from `JavaSingleThreadExecutor`.
20
+ # On all other platforms it will inherit from `RubySingleThreadExecutor`.
21
+ #
22
+ # @see Concurrent::RubySingleThreadExecutor
23
+ # @see Concurrent::JavaSingleThreadExecutor
24
+ #
25
+ # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
26
+ # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
27
+ # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html
28
+ class SingleThreadExecutor < JavaSingleThreadExecutor
29
+ end
30
+ else
31
+ # @!macro single_thread_executor
32
+ class SingleThreadExecutor < RubySingleThreadExecutor
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,68 @@
1
+ require 'concurrent/executor/ruby_thread_pool_executor'
2
+
3
+ module Concurrent
4
+
5
+ if RUBY_PLATFORM == 'java'
6
+ require 'concurrent/executor/java_thread_pool_executor'
7
+ # @!macro [attach] thread_pool_executor
8
+ #
9
+ # An abstraction composed of one or more threads and a task queue. Tasks
10
+ # (blocks or `proc` objects) are submit to the pool and added to the queue.
11
+ # The threads in the pool remove the tasks and execute them in the order
12
+ # they were received. When there are more tasks queued than there are
13
+ # threads to execute them the pool will create new threads, up to the
14
+ # configured maximum. Similarly, threads that are idle for too long will
15
+ # be garbage collected, down to the configured minimum options. Should a
16
+ # thread crash it, too, will be garbage collected.
17
+ #
18
+ # `ThreadPoolExecutor` is based on the Java class of the same name. From
19
+ # the official Java documentationa;
20
+ #
21
+ # > Thread pools address two different problems: they usually provide
22
+ # > improved performance when executing large numbers of asynchronous tasks,
23
+ # > due to reduced per-task invocation overhead, and they provide a means
24
+ # > of bounding and managing the resources, including threads, consumed
25
+ # > when executing a collection of tasks. Each ThreadPoolExecutor also
26
+ # > maintains some basic statistics, such as the number of completed tasks.
27
+ # >
28
+ # > To be useful across a wide range of contexts, this class provides many
29
+ # > adjustable parameters and extensibility hooks. However, programmers are
30
+ # > urged to use the more convenient Executors factory methods
31
+ # > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation),
32
+ # > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single
33
+ # > background thread), that preconfigure settings for the most common usage
34
+ # > scenarios.
35
+ #
36
+ # Thread pools support several configuration options:
37
+ #
38
+ # * `max_threads`: The maximum number of threads that may be created in the pool.
39
+ # * `min_threads`: The minimum number of threads that may be retained in the pool.
40
+ # * `idletime`: The number of seconds that a thread may be idle before being reclaimed.
41
+ # * `max_queue`: The maximum number of tasks that may be waiting in the work queue at
42
+ # any one time. When the queue size reaches `max_queue` subsequent tasks will be
43
+ # rejected in accordance with the configured `overflow_policy`.
44
+ # * `overflow_policy`: The policy defining how rejected tasks are handled. #
45
+ #
46
+ # Three overflow policies are supported:
47
+ #
48
+ # * `:abort`: Raise a `RejectedExecutionError` exception and discard the task.
49
+ # * `:discard`: Silently discard the task and return `nil` as the task result.
50
+ # * `:caller_runs`: Execute the task on the calling thread.
51
+ #
52
+ # @note When running on the JVM (JRuby) this class will inherit from `JavaThreadPoolExecutor`.
53
+ # On all other platforms it will inherit from `RubyThreadPoolExecutor`.
54
+ #
55
+ # @see Concurrent::RubyThreadPoolExecutor
56
+ # @see Concurrent::JavaThreadPoolExecutor
57
+ #
58
+ # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
59
+ # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
60
+ # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html
61
+ class ThreadPoolExecutor < JavaThreadPoolExecutor
62
+ end
63
+ else
64
+ # @!macro thread_pool_executor
65
+ class ThreadPoolExecutor < RubyThreadPoolExecutor
66
+ end
67
+ end
68
+ end
@@ -0,0 +1,143 @@
1
+ require 'thread'
2
+ require_relative 'executor'
3
+ require 'concurrent/options_parser'
4
+ require 'concurrent/atomic/event'
5
+ require 'concurrent/collection/priority_queue'
6
+ require 'concurrent/executor/single_thread_executor'
7
+
8
+ module Concurrent
9
+
10
+ # Executes a collection of tasks at the specified times. A master thread
11
+ # monitors the set and schedules each task for execution at the appropriate
12
+ # time. Tasks are run on the global task pool or on the supplied executor.
13
+ class TimerSet
14
+ include RubyExecutor
15
+
16
+ # Create a new set of timed tasks.
17
+ #
18
+ # @param [Hash] opts the options controlling how the future will be processed
19
+ # @option opts [Boolean] :operation (false) when `true` will execute the future on the global
20
+ # operation pool (for long-running operations), when `false` will execute the future on the
21
+ # global task pool (for short-running tasks)
22
+ # @option opts [object] :executor when provided will run all operations on
23
+ # this executor rather than the global thread pool (overrides :operation)
24
+ def initialize(opts = {})
25
+ @queue = PriorityQueue.new(order: :min)
26
+ @task_executor = OptionsParser::get_executor_from(opts) || Concurrent.configuration.global_task_pool
27
+ @timer_executor = SingleThreadExecutor.new
28
+ @condition = Condition.new
29
+ init_executor
30
+ end
31
+
32
+ # Post a task to be execute at the specified time. The given time may be either
33
+ # a `Time` object or the number of seconds to wait. If the intended execution
34
+ # time is within 1/100th of a second of the current time the task will be
35
+ # immediately post to the executor.
36
+ #
37
+ # @param [Object] intended_time the time to schedule the task for execution
38
+ #
39
+ # @yield the task to be performed
40
+ #
41
+ # @return [Boolean] true if the message is post, false after shutdown
42
+ #
43
+ # @raise [ArgumentError] if the intended execution time is not in the future
44
+ # @raise [ArgumentError] if no block is given
45
+ def post(intended_time, *args, &task)
46
+ time = TimerSet.calculate_schedule_time(intended_time).to_f
47
+ raise ArgumentError.new('no block given') unless block_given?
48
+
49
+ mutex.synchronize do
50
+ return false unless running?
51
+
52
+ if (time - Time.now.to_f) <= 0.01
53
+ @task_executor.post(*args, &task)
54
+ else
55
+ @queue.push(Task.new(time, args, task))
56
+ @timer_executor.post(&method(:process_tasks))
57
+ end
58
+
59
+ true
60
+ end
61
+
62
+ end
63
+
64
+ # For a timer, #kill is like an orderly shutdown, except we need to manually
65
+ # (and destructively) clear the queue first
66
+ def kill
67
+ @queue.clear
68
+ shutdown
69
+ end
70
+
71
+ # Calculate an Epoch time with milliseconds at which to execute a
72
+ # task. If the given time is a `Time` object it will be converted
73
+ # accordingly. If the time is an integer value greater than zero
74
+ # it will be understood as a number of seconds in the future and
75
+ # will be added to the current time to calculate Epoch.
76
+ #
77
+ # @param [Object] intended_time the time (as a `Time` object or an integer)
78
+ # to schedule the task for execution
79
+ # @param [Time] now (Time.now) the time from which to calculate an interval
80
+ #
81
+ # @return [Fixnum] the intended time as seconds/millis from Epoch
82
+ #
83
+ # @raise [ArgumentError] if the intended execution time is not in the future
84
+ def self.calculate_schedule_time(intended_time, now = Time.now)
85
+ if intended_time.is_a?(Time)
86
+ raise ArgumentError.new('schedule time must be in the future') if intended_time <= now
87
+ intended_time
88
+ else
89
+ raise ArgumentError.new('seconds must be greater than zero') if intended_time.to_f < 0.0
90
+ now + intended_time
91
+ end
92
+ end
93
+
94
+ private
95
+
96
+ # A struct for encapsulating a task and its intended execution time.
97
+ # It facilitates proper prioritization by overriding the comparison
98
+ # (spaceship) operator as a comparison of the intended execution
99
+ # times.
100
+ #
101
+ # @!visibility private
102
+ Task = Struct.new(:time, :args, :op) do
103
+ include Comparable
104
+
105
+ def <=>(other)
106
+ self.time <=> other.time
107
+ end
108
+ end
109
+
110
+ private_constant :Task
111
+
112
+ # @!visibility private
113
+ def shutdown_execution
114
+ @queue.clear
115
+ @timer_executor.kill
116
+ stopped_event.set
117
+ end
118
+
119
+ # Run a loop and execute tasks in the scheduled order and at the approximate
120
+ # scheduled time. If no tasks remain the thread will exit gracefully so that
121
+ # garbage collection can occur. If there are no ready tasks it will sleep
122
+ # for up to 60 seconds waiting for the next scheduled task.
123
+ #
124
+ # @!visibility private
125
+ def process_tasks
126
+ loop do
127
+ break if @queue.empty?
128
+
129
+ task = @queue.peek
130
+ interval = task.time - Time.now.to_f
131
+
132
+ if interval <= 0
133
+ @task_executor.post(*task.args, &task.op)
134
+ @queue.pop
135
+ else
136
+ mutex.synchronize do
137
+ @condition.wait(mutex, [interval, 60].min)
138
+ end
139
+ end
140
+ end
141
+ end
142
+ end
143
+ end
@@ -0,0 +1,9 @@
1
+ require 'concurrent/executor/cached_thread_pool'
2
+ require 'concurrent/executor/fixed_thread_pool'
3
+ require 'concurrent/executor/immediate_executor'
4
+ require 'concurrent/executor/per_thread_executor'
5
+ require 'concurrent/executor/safe_task_executor'
6
+ require 'concurrent/executor/single_thread_executor'
7
+ require 'concurrent/executor/thread_pool_executor'
8
+ require 'concurrent/executor/timer_set'
9
+ require 'concurrent/executor/serialized_execution'
@@ -0,0 +1,125 @@
1
+ require 'thread'
2
+
3
+ require 'concurrent/options_parser'
4
+ require 'concurrent/ivar'
5
+ require 'concurrent/executor/safe_task_executor'
6
+
7
+ module Concurrent
8
+
9
+ # A `Future` represents a promise to complete an action at some time in the future.
10
+ # The action is atomic and permanent. The idea behind a future is to send an operation
11
+ # for asynchronous completion, do other stuff, then return and retrieve the result
12
+ # of the async operation at a later time.
13
+ #
14
+ # A `Future` has four possible states: *:unscheduled*, *:pending*, *:rejected*, or *:fulfilled*.
15
+ # When a `Future` is created its state is set to *:unscheduled*. Once the `#execute` method is
16
+ # called the state becomes *:pending* and will remain in that state until processing is
17
+ # complete. A completed `Future` is either *:rejected*, indicating that an exception was
18
+ # thrown during processing, or *:fulfilled*, indicating success. If a `Future` is *:fulfilled*
19
+ # its `value` will be updated to reflect the result of the operation. If *:rejected* the
20
+ # `reason` will be updated with a reference to the thrown exception. The predicate methods
21
+ # `#unscheduled?`, `#pending?`, `#rejected?`, and `fulfilled?` can be called at any time to
22
+ # obtain the state of the `Future`, as can the `#state` method, which returns a symbol.
23
+ #
24
+ # Retrieving the value of a `Future` is done through the `#value` (alias: `#deref`) method.
25
+ # Obtaining the value of a `Future` is a potentially blocking operation. When a `Future` is
26
+ # *:rejected* a call to `#value` will return `nil` immediately. When a `Future` is
27
+ # *:fulfilled* a call to `#value` will immediately return the current value. When a
28
+ # `Future` is *:pending* a call to `#value` will block until the `Future` is either
29
+ # *:rejected* or *:fulfilled*. A *timeout* value can be passed to `#value` to limit how
30
+ # long the call will block. If `nil` the call will block indefinitely. If `0` the call will
31
+ # not block. Any other integer or float value will indicate the maximum number of seconds to block.
32
+ #
33
+ # The `Future` class also includes the behavior of the Ruby standard library `Observable` module,
34
+ # but does so in a thread-safe way. On fulfillment or rejection all observers will be notified
35
+ # according to the normal `Observable` behavior. The observer callback function will be called
36
+ # with three parameters: the `Time` of fulfillment/rejection, the final `value`, and the final
37
+ # `reason`. Observers added after fulfillment/rejection will still be notified as normal.
38
+ #
39
+ # @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module
40
+ # @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function
41
+ # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html java.util.concurrent.Future
42
+ class Future < IVar
43
+
44
+ # Create a new `Future` in the `:unscheduled` state.
45
+ #
46
+ # @yield the asynchronous operation to perform
47
+ #
48
+ # @param [Hash] opts the options controlling how the future will be processed
49
+ # @option opts [Boolean] :operation (false) when `true` will execute the future on the global
50
+ # operation pool (for long-running operations), when `false` will execute the future on the
51
+ # global task pool (for short-running tasks)
52
+ # @option opts [object] :executor when provided will run all operations on
53
+ # this executor rather than the global thread pool (overrides :operation)
54
+ # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
55
+ # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
56
+ # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
57
+ # returning the value returned from the proc
58
+ #
59
+ # @raise [ArgumentError] if no block is given
60
+ def initialize(opts = {}, &block)
61
+ raise ArgumentError.new('no block given') unless block_given?
62
+ super(IVar::NO_VALUE, opts)
63
+ @state = :unscheduled
64
+ @task = block
65
+ @executor = OptionsParser::get_executor_from(opts) || Concurrent.configuration.global_operation_pool
66
+ end
67
+
68
+ # Execute an `:unscheduled` `Future`. Immediately sets the state to `:pending` and
69
+ # passes the block to a new thread/thread pool for eventual execution.
70
+ # Does nothing if the `Future` is in any state other than `:unscheduled`.
71
+ #
72
+ # @return [Future] a reference to `self`
73
+ #
74
+ # @example Instance and execute in separate steps
75
+ # future = Concurrent::Future.new{ sleep(1); 42 }
76
+ # future.state #=> :unscheduled
77
+ # future.execute
78
+ # future.state #=> :pending
79
+ #
80
+ # @example Instance and execute in one line
81
+ # future = Concurrent::Future.new{ sleep(1); 42 }.execute
82
+ # future.state #=> :pending
83
+ #
84
+ # @since 0.5.0
85
+ def execute
86
+ if compare_and_set_state(:pending, :unscheduled)
87
+ @executor.post{ work }
88
+ self
89
+ end
90
+ end
91
+
92
+ # Create a new `Future` object with the given block, execute it, and return the
93
+ # `:pending` object.
94
+ #
95
+ # @yield the asynchronous operation to perform
96
+ #
97
+ # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
98
+ # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
99
+ # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
100
+ # returning the value returned from the proc
101
+ #
102
+ # @return [Future] the newly created `Future` in the `:pending` state
103
+ #
104
+ # @raise [ArgumentError] if no block is given
105
+ #
106
+ # @example
107
+ # future = Concurrent::Future.execute{ sleep(1); 42 }
108
+ # future.state #=> :pending
109
+ #
110
+ # @since 0.5.0
111
+ def self.execute(opts = {}, &block)
112
+ Future.new(opts, &block).execute
113
+ end
114
+
115
+ protected :set, :fail, :complete
116
+
117
+ private
118
+
119
+ # @!visibility private
120
+ def work # :nodoc:
121
+ success, val, reason = SafeTaskExecutor.new(@task).execute
122
+ complete(success, val, reason)
123
+ end
124
+ end
125
+ end
@@ -0,0 +1,111 @@
1
+ require 'thread'
2
+
3
+ require 'concurrent/errors'
4
+ require 'concurrent/obligation'
5
+ require 'concurrent/observable'
6
+
7
+ module Concurrent
8
+
9
+ # An `IVar` is a single-element container that is normally created empty, and
10
+ # can only be set once. The I in `IVar` stands for immutable. Reading an `IVar`
11
+ # normally blocks until it is set. It is safe to set and read an `IVar` from
12
+ # different threads.
13
+ #
14
+ # If you want to have some parallel task set the value in an `IVar`, you want
15
+ # a `Future`. If you want to create a graph of parallel tasks all executed when
16
+ # the values they depend on are ready you want `dataflow`. `IVar` is generally
17
+ # a low-level primitive.
18
+ #
19
+ # @example Create, set and get an `IVar`
20
+ # ivar = Concurrent::IVar.new
21
+ # ivar.set 14
22
+ # ivar.get #=> 14
23
+ # ivar.set 2 # would now be an error
24
+ class IVar
25
+
26
+ include Obligation
27
+ include Observable
28
+
29
+ # @!visibility private
30
+ NO_VALUE = Object.new # :nodoc:
31
+
32
+ # Create a new `IVar` in the `:pending` state with the (optional) initial value.
33
+ #
34
+ # @param [Object] value the initial value
35
+ # @param [Hash] opts the options to create a message with
36
+ # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
37
+ # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
38
+ # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
39
+ # returning the value returned from the proc
40
+ def initialize(value = NO_VALUE, opts = {})
41
+ init_obligation
42
+ self.observers = CopyOnWriteObserverSet.new
43
+ set_deref_options(opts)
44
+
45
+ if value == NO_VALUE
46
+ @state = :pending
47
+ else
48
+ set(value)
49
+ end
50
+ end
51
+
52
+ # Add an observer on this object that will receive notification on update.
53
+ #
54
+ # Upon completion the `IVar` will notify all observers in a thread-say way. The `func`
55
+ # method of the observer will be called with three arguments: the `Time` at which the
56
+ # `Future` completed the asynchronous operation, the final `value` (or `nil` on rejection),
57
+ # and the final `reason` (or `nil` on fulfillment).
58
+ #
59
+ # @param [Object] observer the object that will be notified of changes
60
+ # @param [Symbol] func symbol naming the method to call when this `Observable` has changes`
61
+ def add_observer(observer = nil, func = :update, &block)
62
+ raise ArgumentError.new('cannot provide both an observer and a block') if observer && block
63
+ direct_notification = false
64
+
65
+ if block
66
+ observer = block
67
+ func = :call
68
+ end
69
+
70
+ mutex.synchronize do
71
+ if event.set?
72
+ direct_notification = true
73
+ else
74
+ observers.add_observer(observer, func)
75
+ end
76
+ end
77
+
78
+ observer.send(func, Time.now, self.value, reason) if direct_notification
79
+ observer
80
+ end
81
+
82
+ # Set the `IVar` to a value and wake or notify all threads waiting on it.
83
+ #
84
+ # @param [Object] value the value to store in the `IVar`
85
+ # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already been set or otherwise completed
86
+ def set(value)
87
+ complete(true, value, nil)
88
+ end
89
+
90
+ # Set the `IVar` to failed due to some error and wake or notify all threads waiting on it.
91
+ #
92
+ # @param [Object] reason for the failure
93
+ # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already been set or otherwise completed
94
+ def fail(reason = StandardError.new)
95
+ complete(false, nil, reason)
96
+ end
97
+
98
+ # @!visibility private
99
+ def complete(success, value, reason) # :nodoc:
100
+ mutex.synchronize do
101
+ raise MultipleAssignmentError.new('multiple assignment') if [:fulfilled, :rejected].include? @state
102
+ set_state(success, value, reason)
103
+ event.set
104
+ end
105
+
106
+ time = Time.now
107
+ observers.notify_and_delete_observers{ [time, self.value, reason] }
108
+ self
109
+ end
110
+ end
111
+ end